id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
232,900
biocommons/hgvs
hgvs/alignmentmapper.py
AlignmentMapper._map
def _map(self, from_pos, to_pos, pos, base): """Map position between aligned sequences Positions in this function are 0-based. """ pos_i = -1 while pos_i < len(self.cigar_op) and pos >= from_pos[pos_i + 1]: pos_i += 1 if pos_i == -1 or pos_i == len(self.cigar_op): raise HGVSInvalidIntervalError("Position is beyond the bounds of transcript record") if self.cigar_op[pos_i] in "=MX": mapped_pos = to_pos[pos_i] + (pos - from_pos[pos_i]) mapped_pos_offset = 0 elif self.cigar_op[pos_i] in "DI": if base == "start": mapped_pos = to_pos[pos_i] - 1 elif base == "end": mapped_pos = to_pos[pos_i] mapped_pos_offset = 0 elif self.cigar_op[pos_i] == "N": if pos - from_pos[pos_i] + 1 <= from_pos[pos_i + 1] - pos: mapped_pos = to_pos[pos_i] - 1 mapped_pos_offset = pos - from_pos[pos_i] + 1 else: mapped_pos = to_pos[pos_i] mapped_pos_offset = -(from_pos[pos_i + 1] - pos) return mapped_pos, mapped_pos_offset, self.cigar_op[pos_i]
python
def _map(self, from_pos, to_pos, pos, base): pos_i = -1 while pos_i < len(self.cigar_op) and pos >= from_pos[pos_i + 1]: pos_i += 1 if pos_i == -1 or pos_i == len(self.cigar_op): raise HGVSInvalidIntervalError("Position is beyond the bounds of transcript record") if self.cigar_op[pos_i] in "=MX": mapped_pos = to_pos[pos_i] + (pos - from_pos[pos_i]) mapped_pos_offset = 0 elif self.cigar_op[pos_i] in "DI": if base == "start": mapped_pos = to_pos[pos_i] - 1 elif base == "end": mapped_pos = to_pos[pos_i] mapped_pos_offset = 0 elif self.cigar_op[pos_i] == "N": if pos - from_pos[pos_i] + 1 <= from_pos[pos_i + 1] - pos: mapped_pos = to_pos[pos_i] - 1 mapped_pos_offset = pos - from_pos[pos_i] + 1 else: mapped_pos = to_pos[pos_i] mapped_pos_offset = -(from_pos[pos_i + 1] - pos) return mapped_pos, mapped_pos_offset, self.cigar_op[pos_i]
[ "def", "_map", "(", "self", ",", "from_pos", ",", "to_pos", ",", "pos", ",", "base", ")", ":", "pos_i", "=", "-", "1", "while", "pos_i", "<", "len", "(", "self", ".", "cigar_op", ")", "and", "pos", ">=", "from_pos", "[", "pos_i", "+", "1", "]", ...
Map position between aligned sequences Positions in this function are 0-based.
[ "Map", "position", "between", "aligned", "sequences" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/alignmentmapper.py#L115-L144
232,901
biocommons/hgvs
hgvs/utils/__init__.py
build_tx_cigar
def build_tx_cigar(exons, strand): """builds a single CIGAR string representing an alignment of the transcript sequence to a reference sequence, including introns. The input exons are expected to be in transcript order, and the resulting CIGAR is also in transcript order. >>> build_tx_cigar([], 1) is None True """ cigarelem_re = re.compile(r"\d+[=DIMNX]") def _reverse_cigar(c): return ''.join(reversed(cigarelem_re.findall(c))) if len(exons) == 0: return None # flip orientation of all CIGARs if on - strand if strand == -1: cigars = [_reverse_cigar(e["cigar"]) for e in exons] else: cigars = [e["cigar"] for e in exons] tx_cigar = [cigars[0]] # exon 1 for i in range(1, len(cigars)): # and intron + exon pairs thereafter intron = str(exons[i]["alt_start_i"] - exons[i - 1]["alt_end_i"]) + "N" tx_cigar += [intron, cigars[i]] tx_cigar_str = "".join(tx_cigar) return tx_cigar_str
python
def build_tx_cigar(exons, strand): cigarelem_re = re.compile(r"\d+[=DIMNX]") def _reverse_cigar(c): return ''.join(reversed(cigarelem_re.findall(c))) if len(exons) == 0: return None # flip orientation of all CIGARs if on - strand if strand == -1: cigars = [_reverse_cigar(e["cigar"]) for e in exons] else: cigars = [e["cigar"] for e in exons] tx_cigar = [cigars[0]] # exon 1 for i in range(1, len(cigars)): # and intron + exon pairs thereafter intron = str(exons[i]["alt_start_i"] - exons[i - 1]["alt_end_i"]) + "N" tx_cigar += [intron, cigars[i]] tx_cigar_str = "".join(tx_cigar) return tx_cigar_str
[ "def", "build_tx_cigar", "(", "exons", ",", "strand", ")", ":", "cigarelem_re", "=", "re", ".", "compile", "(", "r\"\\d+[=DIMNX]\"", ")", "def", "_reverse_cigar", "(", "c", ")", ":", "return", "''", ".", "join", "(", "reversed", "(", "cigarelem_re", ".", ...
builds a single CIGAR string representing an alignment of the transcript sequence to a reference sequence, including introns. The input exons are expected to be in transcript order, and the resulting CIGAR is also in transcript order. >>> build_tx_cigar([], 1) is None True
[ "builds", "a", "single", "CIGAR", "string", "representing", "an", "alignment", "of", "the", "transcript", "sequence", "to", "a", "reference", "sequence", "including", "introns", ".", "The", "input", "exons", "are", "expected", "to", "be", "in", "transcript", "...
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/__init__.py#L9-L39
232,902
biocommons/hgvs
hgvs/utils/altseq_to_hgvsp.py
AltSeqToHgvsp._check_if_ins_is_dup
def _check_if_ins_is_dup(self, start, insertion): """Helper to identify an insertion as a duplicate :param start: 1-based insertion start :type start: int :param insertion: sequence :type insertion: str :return (is duplicate, variant start) :rtype (bool, int) """ is_dup = False # assume no variant_start = None dup_candidate_start = start - len(insertion) - 1 dup_candidate = self._ref_seq[dup_candidate_start:dup_candidate_start + len(insertion)] if insertion == dup_candidate: is_dup = True variant_start = dup_candidate_start + 1 return is_dup, variant_start
python
def _check_if_ins_is_dup(self, start, insertion): is_dup = False # assume no variant_start = None dup_candidate_start = start - len(insertion) - 1 dup_candidate = self._ref_seq[dup_candidate_start:dup_candidate_start + len(insertion)] if insertion == dup_candidate: is_dup = True variant_start = dup_candidate_start + 1 return is_dup, variant_start
[ "def", "_check_if_ins_is_dup", "(", "self", ",", "start", ",", "insertion", ")", ":", "is_dup", "=", "False", "# assume no", "variant_start", "=", "None", "dup_candidate_start", "=", "start", "-", "len", "(", "insertion", ")", "-", "1", "dup_candidate", "=", ...
Helper to identify an insertion as a duplicate :param start: 1-based insertion start :type start: int :param insertion: sequence :type insertion: str :return (is duplicate, variant start) :rtype (bool, int)
[ "Helper", "to", "identify", "an", "insertion", "as", "a", "duplicate" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/altseq_to_hgvsp.py#L299-L318
232,903
biocommons/hgvs
hgvs/utils/altseq_to_hgvsp.py
AltSeqToHgvsp._create_variant
def _create_variant(self, start, end, ref, alt, fsext_len=None, is_dup=False, acc=None, is_ambiguous=False, is_sub=False, is_ext=False, is_no_protein=False, is_init_met=False): """Creates a SequenceVariant object""" if is_init_met: posedit = AARefAlt(ref=ref, alt=alt, init_met=True) elif is_ambiguous: posedit = None else: interval = Interval(start=start, end=end) # Note - order matters if is_no_protein: edit = '0' elif is_sub: edit = AASub(ref=ref, alt=alt) elif is_ext: edit = AAExt(ref=ref, alt=alt, aaterm='*', length=fsext_len) elif self._is_frameshift: edit = AAFs(ref=ref, alt=alt, length=fsext_len) elif is_dup: edit = Dup() elif ref == alt == '': edit = AARefAlt(ref='', alt='') else: edit = AARefAlt(ref=ref, alt=alt) posedit = PosEdit( pos=interval, edit=edit, uncertain=hgvs.global_config.mapping.inferred_p_is_uncertain) var_p = hgvs.sequencevariant.SequenceVariant(acc, 'p', posedit) return var_p
python
def _create_variant(self, start, end, ref, alt, fsext_len=None, is_dup=False, acc=None, is_ambiguous=False, is_sub=False, is_ext=False, is_no_protein=False, is_init_met=False): if is_init_met: posedit = AARefAlt(ref=ref, alt=alt, init_met=True) elif is_ambiguous: posedit = None else: interval = Interval(start=start, end=end) # Note - order matters if is_no_protein: edit = '0' elif is_sub: edit = AASub(ref=ref, alt=alt) elif is_ext: edit = AAExt(ref=ref, alt=alt, aaterm='*', length=fsext_len) elif self._is_frameshift: edit = AAFs(ref=ref, alt=alt, length=fsext_len) elif is_dup: edit = Dup() elif ref == alt == '': edit = AARefAlt(ref='', alt='') else: edit = AARefAlt(ref=ref, alt=alt) posedit = PosEdit( pos=interval, edit=edit, uncertain=hgvs.global_config.mapping.inferred_p_is_uncertain) var_p = hgvs.sequencevariant.SequenceVariant(acc, 'p', posedit) return var_p
[ "def", "_create_variant", "(", "self", ",", "start", ",", "end", ",", "ref", ",", "alt", ",", "fsext_len", "=", "None", ",", "is_dup", "=", "False", ",", "acc", "=", "None", ",", "is_ambiguous", "=", "False", ",", "is_sub", "=", "False", ",", "is_ext...
Creates a SequenceVariant object
[ "Creates", "a", "SequenceVariant", "object" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/altseq_to_hgvsp.py#L320-L361
232,904
biocommons/hgvs
hgvs/dataproviders/uta.py
connect
def connect(db_url=None, pooling=hgvs.global_config.uta.pooling, application_name=None, mode=None, cache=None): """Connect to a UTA database instance and return a UTA interface instance. :param db_url: URL for database connection :type db_url: string :param pooling: whether to use connection pooling (postgresql only) :type pooling: bool :param application_name: log application name in connection (useful for debugging; PostgreSQL only) :type application_name: str When called with an explicit db_url argument, that db_url is used for connecting. When called without an explicit argument, the function default is determined by the environment variable UTA_DB_URL if it exists, or hgvs.datainterface.uta.public_db_url otherwise. >>> hdp = connect() >>> hdp.schema_version() '1.1' The format of the db_url is driver://user:pass@host/database/schema (the same as that used by SQLAlchemy). Examples: A remote public postgresql database: postgresql://anonymous:anonymous@uta.biocommons.org/uta/uta_20170707' A local postgresql database: postgresql://localhost/uta_dev/uta_20170707 For postgresql db_urls, pooling=True causes connect to use a psycopg2.pool.ThreadedConnectionPool. """ _logger.debug('connecting to ' + str(db_url) + '...') if db_url is None: db_url = _get_uta_db_url() url = _parse_url(db_url) if url.scheme == 'sqlite': conn = UTA_sqlite(url, mode, cache) elif url.scheme == 'postgresql': conn = UTA_postgresql( url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache) else: # fell through connection scheme cases raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url)) _logger.info('connected to ' + str(db_url) + '...') return conn
python
def connect(db_url=None, pooling=hgvs.global_config.uta.pooling, application_name=None, mode=None, cache=None): _logger.debug('connecting to ' + str(db_url) + '...') if db_url is None: db_url = _get_uta_db_url() url = _parse_url(db_url) if url.scheme == 'sqlite': conn = UTA_sqlite(url, mode, cache) elif url.scheme == 'postgresql': conn = UTA_postgresql( url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache) else: # fell through connection scheme cases raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url)) _logger.info('connected to ' + str(db_url) + '...') return conn
[ "def", "connect", "(", "db_url", "=", "None", ",", "pooling", "=", "hgvs", ".", "global_config", ".", "uta", ".", "pooling", ",", "application_name", "=", "None", ",", "mode", "=", "None", ",", "cache", "=", "None", ")", ":", "_logger", ".", "debug", ...
Connect to a UTA database instance and return a UTA interface instance. :param db_url: URL for database connection :type db_url: string :param pooling: whether to use connection pooling (postgresql only) :type pooling: bool :param application_name: log application name in connection (useful for debugging; PostgreSQL only) :type application_name: str When called with an explicit db_url argument, that db_url is used for connecting. When called without an explicit argument, the function default is determined by the environment variable UTA_DB_URL if it exists, or hgvs.datainterface.uta.public_db_url otherwise. >>> hdp = connect() >>> hdp.schema_version() '1.1' The format of the db_url is driver://user:pass@host/database/schema (the same as that used by SQLAlchemy). Examples: A remote public postgresql database: postgresql://anonymous:anonymous@uta.biocommons.org/uta/uta_20170707' A local postgresql database: postgresql://localhost/uta_dev/uta_20170707 For postgresql db_urls, pooling=True causes connect to use a psycopg2.pool.ThreadedConnectionPool.
[ "Connect", "to", "a", "UTA", "database", "instance", "and", "return", "a", "UTA", "interface", "instance", "." ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/dataproviders/uta.py#L62-L114
232,905
biocommons/hgvs
hgvs/dataproviders/uta.py
UTABase.get_tx_for_region
def get_tx_for_region(self, alt_ac, alt_aln_method, start_i, end_i): """ return transcripts that overlap given region :param str alt_ac: reference sequence (e.g., NC_000007.13) :param str alt_aln_method: alignment method (e.g., splign) :param int start_i: 5' bound of region :param int end_i: 3' bound of region """ return self._fetchall(self._queries['tx_for_region'], [alt_ac, alt_aln_method, start_i, end_i])
python
def get_tx_for_region(self, alt_ac, alt_aln_method, start_i, end_i): return self._fetchall(self._queries['tx_for_region'], [alt_ac, alt_aln_method, start_i, end_i])
[ "def", "get_tx_for_region", "(", "self", ",", "alt_ac", ",", "alt_aln_method", ",", "start_i", ",", "end_i", ")", ":", "return", "self", ".", "_fetchall", "(", "self", ".", "_queries", "[", "'tx_for_region'", "]", ",", "[", "alt_ac", ",", "alt_aln_method", ...
return transcripts that overlap given region :param str alt_ac: reference sequence (e.g., NC_000007.13) :param str alt_aln_method: alignment method (e.g., splign) :param int start_i: 5' bound of region :param int end_i: 3' bound of region
[ "return", "transcripts", "that", "overlap", "given", "region" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/dataproviders/uta.py#L330-L340
232,906
biocommons/hgvs
hgvs/dataproviders/uta.py
UTABase.get_tx_identity_info
def get_tx_identity_info(self, tx_ac): """returns features associated with a single transcript. :param tx_ac: transcript accession with version (e.g., 'NM_199425.2') :type tx_ac: str # database output -[ RECORD 1 ]--+------------- tx_ac | NM_199425.2 alt_ac | NM_199425.2 alt_aln_method | transcript cds_start_i | 283 cds_end_i | 1003 lengths | {707,79,410} hgnc | VSX1 """ rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac]) if len(rows) == 0: raise HGVSDataNotAvailableError( "No transcript definition for (tx_ac={tx_ac})".format(tx_ac=tx_ac)) return rows[0]
python
def get_tx_identity_info(self, tx_ac): rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac]) if len(rows) == 0: raise HGVSDataNotAvailableError( "No transcript definition for (tx_ac={tx_ac})".format(tx_ac=tx_ac)) return rows[0]
[ "def", "get_tx_identity_info", "(", "self", ",", "tx_ac", ")", ":", "rows", "=", "self", ".", "_fetchall", "(", "self", ".", "_queries", "[", "'tx_identity_info'", "]", ",", "[", "tx_ac", "]", ")", "if", "len", "(", "rows", ")", "==", "0", ":", "rais...
returns features associated with a single transcript. :param tx_ac: transcript accession with version (e.g., 'NM_199425.2') :type tx_ac: str # database output -[ RECORD 1 ]--+------------- tx_ac | NM_199425.2 alt_ac | NM_199425.2 alt_aln_method | transcript cds_start_i | 283 cds_end_i | 1003 lengths | {707,79,410} hgnc | VSX1
[ "returns", "features", "associated", "with", "a", "single", "transcript", "." ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/dataproviders/uta.py#L342-L363
232,907
biocommons/hgvs
hgvs/dataproviders/uta.py
UTABase.get_similar_transcripts
def get_similar_transcripts(self, tx_ac): """Return a list of transcripts that are similar to the given transcript, with relevant similarity criteria. >> sim_tx = hdp.get_similar_transcripts('NM_001285829.1') >> dict(sim_tx[0]) { 'cds_eq': False, 'cds_es_fp_eq': False, 'es_fp_eq': True, 'tx_ac1': 'NM_001285829.1', 'tx_ac2': 'ENST00000498907' } where: * cds_eq means that the CDS sequences are identical * es_fp_eq means that the full exon structures are identical (i.e., incl. UTR) * cds_es_fp_eq means that the cds-clipped portions of the exon structures are identical (i.e., ecluding. UTR) * Hint: "es" = "exon set", "fp" = "fingerprint", "eq" = "equal" "exon structure" refers to the start and end coordinates on a specified reference sequence. Thus, having the same exon structure means that the transcripts are defined on the same reference sequence and have the same exon spans on that sequence. """ rows = self._fetchall(self._queries['tx_similar'], [tx_ac]) return rows
python
def get_similar_transcripts(self, tx_ac): rows = self._fetchall(self._queries['tx_similar'], [tx_ac]) return rows
[ "def", "get_similar_transcripts", "(", "self", ",", "tx_ac", ")", ":", "rows", "=", "self", ".", "_fetchall", "(", "self", ".", "_queries", "[", "'tx_similar'", "]", ",", "[", "tx_ac", "]", ")", "return", "rows" ]
Return a list of transcripts that are similar to the given transcript, with relevant similarity criteria. >> sim_tx = hdp.get_similar_transcripts('NM_001285829.1') >> dict(sim_tx[0]) { 'cds_eq': False, 'cds_es_fp_eq': False, 'es_fp_eq': True, 'tx_ac1': 'NM_001285829.1', 'tx_ac2': 'ENST00000498907' } where: * cds_eq means that the CDS sequences are identical * es_fp_eq means that the full exon structures are identical (i.e., incl. UTR) * cds_es_fp_eq means that the cds-clipped portions of the exon structures are identical (i.e., ecluding. UTR) * Hint: "es" = "exon set", "fp" = "fingerprint", "eq" = "equal" "exon structure" refers to the start and end coordinates on a specified reference sequence. Thus, having the same exon structure means that the transcripts are defined on the same reference sequence and have the same exon spans on that sequence.
[ "Return", "a", "list", "of", "transcripts", "that", "are", "similar", "to", "the", "given", "transcript", "with", "relevant", "similarity", "criteria", "." ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/dataproviders/uta.py#L431-L461
232,908
biocommons/hgvs
hgvs/decorators/lru_cache.py
_make_key
def _make_key(func, args, kwds, typed, kwd_mark=(object(), ), fasttypes={int, str, frozenset, type(None)}, sorted=sorted, tuple=tuple, type=type, len=len): 'Make a cache key from optionally typed positional and keyword arguments' key = args key += kwd_mark key += ('__func__', func) if kwds: sorted_items = sorted(kwds.items()) for item in sorted_items: key += item if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for k, v in sorted_items) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key)
python
def _make_key(func, args, kwds, typed, kwd_mark=(object(), ), fasttypes={int, str, frozenset, type(None)}, sorted=sorted, tuple=tuple, type=type, len=len): 'Make a cache key from optionally typed positional and keyword arguments' key = args key += kwd_mark key += ('__func__', func) if kwds: sorted_items = sorted(kwds.items()) for item in sorted_items: key += item if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for k, v in sorted_items) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key)
[ "def", "_make_key", "(", "func", ",", "args", ",", "kwds", ",", "typed", ",", "kwd_mark", "=", "(", "object", "(", ")", ",", ")", ",", "fasttypes", "=", "{", "int", ",", "str", ",", "frozenset", ",", "type", "(", "None", ")", "}", ",", "sorted", ...
Make a cache key from optionally typed positional and keyword arguments
[ "Make", "a", "cache", "key", "from", "optionally", "typed", "positional", "and", "keyword", "arguments" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/decorators/lru_cache.py#L45-L69
232,909
biocommons/hgvs
hgvs/normalizer.py
Normalizer._get_boundary
def _get_boundary(self, var): """Get the position of exon-intron boundary for current variant """ if var.type == "r" or var.type == "n": if self.cross_boundaries: return 0, float("inf") else: # Get genomic sequence access number for this transcript map_info = self.hdp.get_tx_mapping_options(var.ac) if not map_info: raise HGVSDataNotAvailableError( "No mapping info available for {ac}".format(ac=var.ac)) map_info = [ item for item in map_info if item["alt_aln_method"] == self.alt_aln_method ] alt_ac = map_info[0]["alt_ac"] # Get tx info tx_info = self.hdp.get_tx_info(var.ac, alt_ac, self.alt_aln_method) cds_start = tx_info["cds_start_i"] cds_end = tx_info["cds_end_i"] # Get exon info exon_info = self.hdp.get_tx_exons(var.ac, alt_ac, self.alt_aln_method) exon_starts = [exon["tx_start_i"] for exon in exon_info] exon_ends = [exon["tx_end_i"] for exon in exon_info] exon_starts.sort() exon_ends.sort() exon_starts.append(exon_ends[-1]) exon_ends.append(float("inf")) # Find the end pos of the exon where the var locates left = 0 right = float("inf") # TODO: #242: implement methods to find tx regions for i, _ in enumerate(exon_starts): if (var.posedit.pos.start.base - 1 >= exon_starts[i] and var.posedit.pos.start.base - 1 < exon_ends[i]): break for j, _ in enumerate(exon_starts): if (var.posedit.pos.end.base - 1 >= exon_starts[j] and var.posedit.pos.end.base - 1 < exon_ends[j]): break if i != j: raise HGVSUnsupportedOperationError( "Unsupported normalization of variants spanning the exon-intron boundary ({var})" .format(var=var)) left = exon_starts[i] right = exon_ends[i] if cds_start is None: pass elif var.posedit.pos.end.base - 1 < cds_start: right = min(right, cds_start) elif var.posedit.pos.start.base - 1 >= cds_start: left = max(left, cds_start) else: raise HGVSUnsupportedOperationError( "Unsupported normalization of variants spanning the UTR-exon boundary ({var})" .format(var=var)) if cds_end is None: pass elif var.posedit.pos.start.base - 1 >= cds_end: left = max(left, cds_end) elif var.posedit.pos.end.base - 1 < cds_end: right = min(right, cds_end) else: raise HGVSUnsupportedOperationError( "Unsupported normalization of variants spanning the exon-UTR boundary ({var})" .format(var=var)) return left, right else: # For variant type of g and m etc. return 0, float("inf")
python
def _get_boundary(self, var): if var.type == "r" or var.type == "n": if self.cross_boundaries: return 0, float("inf") else: # Get genomic sequence access number for this transcript map_info = self.hdp.get_tx_mapping_options(var.ac) if not map_info: raise HGVSDataNotAvailableError( "No mapping info available for {ac}".format(ac=var.ac)) map_info = [ item for item in map_info if item["alt_aln_method"] == self.alt_aln_method ] alt_ac = map_info[0]["alt_ac"] # Get tx info tx_info = self.hdp.get_tx_info(var.ac, alt_ac, self.alt_aln_method) cds_start = tx_info["cds_start_i"] cds_end = tx_info["cds_end_i"] # Get exon info exon_info = self.hdp.get_tx_exons(var.ac, alt_ac, self.alt_aln_method) exon_starts = [exon["tx_start_i"] for exon in exon_info] exon_ends = [exon["tx_end_i"] for exon in exon_info] exon_starts.sort() exon_ends.sort() exon_starts.append(exon_ends[-1]) exon_ends.append(float("inf")) # Find the end pos of the exon where the var locates left = 0 right = float("inf") # TODO: #242: implement methods to find tx regions for i, _ in enumerate(exon_starts): if (var.posedit.pos.start.base - 1 >= exon_starts[i] and var.posedit.pos.start.base - 1 < exon_ends[i]): break for j, _ in enumerate(exon_starts): if (var.posedit.pos.end.base - 1 >= exon_starts[j] and var.posedit.pos.end.base - 1 < exon_ends[j]): break if i != j: raise HGVSUnsupportedOperationError( "Unsupported normalization of variants spanning the exon-intron boundary ({var})" .format(var=var)) left = exon_starts[i] right = exon_ends[i] if cds_start is None: pass elif var.posedit.pos.end.base - 1 < cds_start: right = min(right, cds_start) elif var.posedit.pos.start.base - 1 >= cds_start: left = max(left, cds_start) else: raise HGVSUnsupportedOperationError( "Unsupported normalization of variants spanning the UTR-exon boundary ({var})" .format(var=var)) if cds_end is None: pass elif var.posedit.pos.start.base - 1 >= cds_end: left = max(left, cds_end) elif var.posedit.pos.end.base - 1 < cds_end: right = min(right, cds_end) else: raise HGVSUnsupportedOperationError( "Unsupported normalization of variants spanning the exon-UTR boundary ({var})" .format(var=var)) return left, right else: # For variant type of g and m etc. return 0, float("inf")
[ "def", "_get_boundary", "(", "self", ",", "var", ")", ":", "if", "var", ".", "type", "==", "\"r\"", "or", "var", ".", "type", "==", "\"n\"", ":", "if", "self", ".", "cross_boundaries", ":", "return", "0", ",", "float", "(", "\"inf\"", ")", "else", ...
Get the position of exon-intron boundary for current variant
[ "Get", "the", "position", "of", "exon", "-", "intron", "boundary", "for", "current", "variant" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/normalizer.py#L179-L258
232,910
biocommons/hgvs
hgvs/normalizer.py
Normalizer._get_tgt_length
def _get_tgt_length(self, var): """Get the total length of the whole reference sequence """ if var.type == "g" or var.type == "m": return float("inf") else: # Get genomic sequence access number for this transcript identity_info = self.hdp.get_tx_identity_info(var.ac) if not identity_info: raise HGVSDataNotAvailableError( "No identity info available for {ac}".format(ac=var.ac)) tgt_len = sum(identity_info["lengths"]) return tgt_len
python
def _get_tgt_length(self, var): if var.type == "g" or var.type == "m": return float("inf") else: # Get genomic sequence access number for this transcript identity_info = self.hdp.get_tx_identity_info(var.ac) if not identity_info: raise HGVSDataNotAvailableError( "No identity info available for {ac}".format(ac=var.ac)) tgt_len = sum(identity_info["lengths"]) return tgt_len
[ "def", "_get_tgt_length", "(", "self", ",", "var", ")", ":", "if", "var", ".", "type", "==", "\"g\"", "or", "var", ".", "type", "==", "\"m\"", ":", "return", "float", "(", "\"inf\"", ")", "else", ":", "# Get genomic sequence access number for this transcript",...
Get the total length of the whole reference sequence
[ "Get", "the", "total", "length", "of", "the", "whole", "reference", "sequence" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/normalizer.py#L260-L272
232,911
biocommons/hgvs
hgvs/normalizer.py
Normalizer._fetch_bounded_seq
def _fetch_bounded_seq(self, var, start, end, window_size, boundary): """Fetch reference sequence from hgvs data provider. The start position is 0 and the interval is half open """ var_len = end - start - window_size start = start if start >= boundary[0] else boundary[0] end = end if end <= boundary[1] else boundary[1] if start >= end: return "" seq = self.hdp.get_seq(var.ac, start, end) if len(seq) < end - start and len(seq) < var_len: raise HGVSInvalidVariantError( "Variant span is outside sequence bounds ({var})".format(var=var)) return seq
python
def _fetch_bounded_seq(self, var, start, end, window_size, boundary): var_len = end - start - window_size start = start if start >= boundary[0] else boundary[0] end = end if end <= boundary[1] else boundary[1] if start >= end: return "" seq = self.hdp.get_seq(var.ac, start, end) if len(seq) < end - start and len(seq) < var_len: raise HGVSInvalidVariantError( "Variant span is outside sequence bounds ({var})".format(var=var)) return seq
[ "def", "_fetch_bounded_seq", "(", "self", ",", "var", ",", "start", ",", "end", ",", "window_size", ",", "boundary", ")", ":", "var_len", "=", "end", "-", "start", "-", "window_size", "start", "=", "start", "if", "start", ">=", "boundary", "[", "0", "]...
Fetch reference sequence from hgvs data provider. The start position is 0 and the interval is half open
[ "Fetch", "reference", "sequence", "from", "hgvs", "data", "provider", "." ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/normalizer.py#L274-L292
232,912
biocommons/hgvs
hgvs/normalizer.py
Normalizer._get_ref_alt
def _get_ref_alt(self, var, boundary): """Get reference allele and alternative allele of the variant """ # Get reference allele if var.posedit.edit.type == "ins" or var.posedit.edit.type == "dup": ref = "" else: # For NARefAlt and Inv if var.posedit.edit.ref_s is None or var.posedit.edit.ref == "": ref = self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary) else: ref = var.posedit.edit.ref # Get alternative allele if var.posedit.edit.type == "sub" or var.posedit.edit.type == "delins" or var.posedit.edit.type == "ins": alt = var.posedit.edit.alt elif var.posedit.edit.type == "del": alt = "" elif var.posedit.edit.type == "dup": alt = var.posedit.edit.ref or self._fetch_bounded_seq( var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary) elif var.posedit.edit.type == "inv": alt = reverse_complement(ref) elif var.posedit.edit.type == "identity": alt = ref return ref, alt
python
def _get_ref_alt(self, var, boundary): # Get reference allele if var.posedit.edit.type == "ins" or var.posedit.edit.type == "dup": ref = "" else: # For NARefAlt and Inv if var.posedit.edit.ref_s is None or var.posedit.edit.ref == "": ref = self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary) else: ref = var.posedit.edit.ref # Get alternative allele if var.posedit.edit.type == "sub" or var.posedit.edit.type == "delins" or var.posedit.edit.type == "ins": alt = var.posedit.edit.alt elif var.posedit.edit.type == "del": alt = "" elif var.posedit.edit.type == "dup": alt = var.posedit.edit.ref or self._fetch_bounded_seq( var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary) elif var.posedit.edit.type == "inv": alt = reverse_complement(ref) elif var.posedit.edit.type == "identity": alt = ref return ref, alt
[ "def", "_get_ref_alt", "(", "self", ",", "var", ",", "boundary", ")", ":", "# Get reference allele", "if", "var", ".", "posedit", ".", "edit", ".", "type", "==", "\"ins\"", "or", "var", ".", "posedit", ".", "edit", ".", "type", "==", "\"dup\"", ":", "r...
Get reference allele and alternative allele of the variant
[ "Get", "reference", "allele", "and", "alternative", "allele", "of", "the", "variant" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/normalizer.py#L294-L322
232,913
biocommons/hgvs
hgvs/utils/norm.py
trim_common_suffixes
def trim_common_suffixes(strs, min_len=0): """ trim common suffixes >>> trim_common_suffixes('A', 1) (0, 'A') """ if len(strs) < 2: return 0, strs rev_strs = [s[::-1] for s in strs] trimmed, rev_strs = trim_common_prefixes(rev_strs, min_len) if trimmed: strs = [s[::-1] for s in rev_strs] return trimmed, strs
python
def trim_common_suffixes(strs, min_len=0): if len(strs) < 2: return 0, strs rev_strs = [s[::-1] for s in strs] trimmed, rev_strs = trim_common_prefixes(rev_strs, min_len) if trimmed: strs = [s[::-1] for s in rev_strs] return trimmed, strs
[ "def", "trim_common_suffixes", "(", "strs", ",", "min_len", "=", "0", ")", ":", "if", "len", "(", "strs", ")", "<", "2", ":", "return", "0", ",", "strs", "rev_strs", "=", "[", "s", "[", ":", ":", "-", "1", "]", "for", "s", "in", "strs", "]", ...
trim common suffixes >>> trim_common_suffixes('A', 1) (0, 'A')
[ "trim", "common", "suffixes" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/norm.py#L14-L33
232,914
biocommons/hgvs
hgvs/utils/norm.py
trim_common_prefixes
def trim_common_prefixes(strs, min_len=0): """trim common prefixes""" trimmed = 0 if len(strs) > 1: s1 = min(strs) s2 = max(strs) for i in range(len(s1) - min_len): if s1[i] != s2[i]: break trimmed = i + 1 if trimmed > 0: strs = [s[trimmed:] for s in strs] return trimmed, strs
python
def trim_common_prefixes(strs, min_len=0): trimmed = 0 if len(strs) > 1: s1 = min(strs) s2 = max(strs) for i in range(len(s1) - min_len): if s1[i] != s2[i]: break trimmed = i + 1 if trimmed > 0: strs = [s[trimmed:] for s in strs] return trimmed, strs
[ "def", "trim_common_prefixes", "(", "strs", ",", "min_len", "=", "0", ")", ":", "trimmed", "=", "0", "if", "len", "(", "strs", ")", ">", "1", ":", "s1", "=", "min", "(", "strs", ")", "s2", "=", "max", "(", "strs", ")", "for", "i", "in", "range"...
trim common prefixes
[ "trim", "common", "prefixes" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/norm.py#L36-L53
232,915
biocommons/hgvs
hgvs/utils/norm.py
normalize_alleles_left
def normalize_alleles_left(ref, start, stop, alleles, bound, ref_step, shuffle=True): """ Normalize loci by removing extraneous reference padding >>> normalize_alleles_left('A', 1, 2, 'A', 1, 2) shuffled_alleles(start=1, stop=2, alleles='A') """ normalized_alleles = namedtuple('shuffled_alleles', 'start stop alleles') if len(alleles) < 2: return normalized_alleles(start, stop, alleles) # STEP 1: Trim common suffix trimmed, alleles = trim_common_suffixes(alleles) stop -= trimmed # STEP 2: Trim common prefix trimmed, alleles = trim_common_prefixes(alleles) start += trimmed # assert bound <= start,'start={:d}, left bound={:d}'.format(start, bound) # STEP 3: While a null allele exists, left shuffle by prepending alleles # with reference and trimming common suffixes while shuffle and '' in alleles and start > bound: step = min(ref_step, start - bound) r = ref[start - step:start].upper() new_alleles = [r + a for a in alleles] trimmed, new_alleles = trim_common_suffixes(new_alleles) if not trimmed: break start -= trimmed stop -= trimmed if trimmed == step: alleles = new_alleles else: left = step - trimmed alleles = [a[left:] for a in new_alleles] break return normalized_alleles(start, stop, tuple(alleles))
python
def normalize_alleles_left(ref, start, stop, alleles, bound, ref_step, shuffle=True): normalized_alleles = namedtuple('shuffled_alleles', 'start stop alleles') if len(alleles) < 2: return normalized_alleles(start, stop, alleles) # STEP 1: Trim common suffix trimmed, alleles = trim_common_suffixes(alleles) stop -= trimmed # STEP 2: Trim common prefix trimmed, alleles = trim_common_prefixes(alleles) start += trimmed # assert bound <= start,'start={:d}, left bound={:d}'.format(start, bound) # STEP 3: While a null allele exists, left shuffle by prepending alleles # with reference and trimming common suffixes while shuffle and '' in alleles and start > bound: step = min(ref_step, start - bound) r = ref[start - step:start].upper() new_alleles = [r + a for a in alleles] trimmed, new_alleles = trim_common_suffixes(new_alleles) if not trimmed: break start -= trimmed stop -= trimmed if trimmed == step: alleles = new_alleles else: left = step - trimmed alleles = [a[left:] for a in new_alleles] break return normalized_alleles(start, stop, tuple(alleles))
[ "def", "normalize_alleles_left", "(", "ref", ",", "start", ",", "stop", ",", "alleles", ",", "bound", ",", "ref_step", ",", "shuffle", "=", "True", ")", ":", "normalized_alleles", "=", "namedtuple", "(", "'shuffled_alleles'", ",", "'start stop alleles'", ")", ...
Normalize loci by removing extraneous reference padding >>> normalize_alleles_left('A', 1, 2, 'A', 1, 2) shuffled_alleles(start=1, stop=2, alleles='A')
[ "Normalize", "loci", "by", "removing", "extraneous", "reference", "padding" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/norm.py#L56-L103
232,916
biocommons/hgvs
hgvs/utils/validation.py
validate_type_ac_pair
def validate_type_ac_pair(type, ac): """validate that accession is correct for variant type AND that accession is fully specified. """ assert type in valid_pairs, "Unknown variant type " + type if valid_pairs[type].match(ac): return (ValidationLevel.VALID, "Accession ({ac}) is compatible with variant type {type}".format(ac=ac, type=type)) elif invalid_pairs[type].match(ac): return (ValidationLevel.ERROR, "Accession ({ac}) is not compatible with variant type {type}".format( ac=ac, type=type)) else: return (ValidationLevel.WARNING, "Accession ({ac}) is not known to be compatible with variant type {type}".format( ac=ac, type=type))
python
def validate_type_ac_pair(type, ac): assert type in valid_pairs, "Unknown variant type " + type if valid_pairs[type].match(ac): return (ValidationLevel.VALID, "Accession ({ac}) is compatible with variant type {type}".format(ac=ac, type=type)) elif invalid_pairs[type].match(ac): return (ValidationLevel.ERROR, "Accession ({ac}) is not compatible with variant type {type}".format( ac=ac, type=type)) else: return (ValidationLevel.WARNING, "Accession ({ac}) is not known to be compatible with variant type {type}".format( ac=ac, type=type))
[ "def", "validate_type_ac_pair", "(", "type", ",", "ac", ")", ":", "assert", "type", "in", "valid_pairs", ",", "\"Unknown variant type \"", "+", "type", "if", "valid_pairs", "[", "type", "]", ".", "match", "(", "ac", ")", ":", "return", "(", "ValidationLevel"...
validate that accession is correct for variant type AND that accession is fully specified.
[ "validate", "that", "accession", "is", "correct", "for", "variant", "type", "AND", "that", "accession", "is", "fully", "specified", "." ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/validation.py#L30-L47
232,917
biocommons/hgvs
hgvs/utils/altseqbuilder.py
AltSeqBuilder.build_altseq
def build_altseq(self): """given a variant and a sequence, incorporate the variant and return the new sequence Data structure returned is analogous to the data structure used to return the variant sequence, but with an additional parameter denoting the start of a frameshift that should affect all bases downstream. :returns variant sequence data :rtype list of dictionaries """ NOT_CDS = "not_cds_variant" WHOLE_GENE_DELETED = "whole_gene_deleted" type_map = { NARefAlt: self._incorporate_delins, Dup: self._incorporate_dup, Inv: self._incorporate_inv, Repeat: self._incorporate_repeat, NOT_CDS: self._create_alt_equals_ref_noncds, WHOLE_GENE_DELETED: self._create_no_protein } # should loop over each allele rather than assume only 1 variant; return a list for now alt_data = [] variant_location = self._get_variant_region() if variant_location == self.EXON: edit_type = type(self._var_c.posedit.edit) elif variant_location == self.INTRON: edit_type = NOT_CDS elif variant_location == self.T_UTR: edit_type = NOT_CDS elif variant_location == self.F_UTR: # TODO: handle case where variant introduces a Met (new start) edit_type = NOT_CDS elif variant_location == self.WHOLE_GENE: if self._var_c.posedit.edit.type == "del": edit_type = WHOLE_GENE_DELETED elif self._var_c.posedit.edit.type == "dup": _logger.warning( "Whole-gene duplication; consequence assumed to not affect protein product") edit_type = NOT_CDS elif self._var_c.posedit.edit.type == "inv": _logger.warning( "Whole-gene inversion; consequence assumed to not affect protein product") edit_type = NOT_CDS else: edit_type = NOT_CDS else: # should never get here raise ValueError("value_location = {}".format(variant_location)) try: this_alt_data = type_map[edit_type]() except KeyError: raise NotImplementedError("c to p translation unsupported for {} type {}".format( self._var_c, edit_type)) # get the start of the "terminal" frameshift (i.e. one never "cancelled out") this_alt_data = self._get_frameshift_start(this_alt_data) alt_data.append(this_alt_data) if DBG: print(this_alt_data.transcript_sequence) return alt_data
python
def build_altseq(self): NOT_CDS = "not_cds_variant" WHOLE_GENE_DELETED = "whole_gene_deleted" type_map = { NARefAlt: self._incorporate_delins, Dup: self._incorporate_dup, Inv: self._incorporate_inv, Repeat: self._incorporate_repeat, NOT_CDS: self._create_alt_equals_ref_noncds, WHOLE_GENE_DELETED: self._create_no_protein } # should loop over each allele rather than assume only 1 variant; return a list for now alt_data = [] variant_location = self._get_variant_region() if variant_location == self.EXON: edit_type = type(self._var_c.posedit.edit) elif variant_location == self.INTRON: edit_type = NOT_CDS elif variant_location == self.T_UTR: edit_type = NOT_CDS elif variant_location == self.F_UTR: # TODO: handle case where variant introduces a Met (new start) edit_type = NOT_CDS elif variant_location == self.WHOLE_GENE: if self._var_c.posedit.edit.type == "del": edit_type = WHOLE_GENE_DELETED elif self._var_c.posedit.edit.type == "dup": _logger.warning( "Whole-gene duplication; consequence assumed to not affect protein product") edit_type = NOT_CDS elif self._var_c.posedit.edit.type == "inv": _logger.warning( "Whole-gene inversion; consequence assumed to not affect protein product") edit_type = NOT_CDS else: edit_type = NOT_CDS else: # should never get here raise ValueError("value_location = {}".format(variant_location)) try: this_alt_data = type_map[edit_type]() except KeyError: raise NotImplementedError("c to p translation unsupported for {} type {}".format( self._var_c, edit_type)) # get the start of the "terminal" frameshift (i.e. one never "cancelled out") this_alt_data = self._get_frameshift_start(this_alt_data) alt_data.append(this_alt_data) if DBG: print(this_alt_data.transcript_sequence) return alt_data
[ "def", "build_altseq", "(", "self", ")", ":", "NOT_CDS", "=", "\"not_cds_variant\"", "WHOLE_GENE_DELETED", "=", "\"whole_gene_deleted\"", "type_map", "=", "{", "NARefAlt", ":", "self", ".", "_incorporate_delins", ",", "Dup", ":", "self", ".", "_incorporate_dup", "...
given a variant and a sequence, incorporate the variant and return the new sequence Data structure returned is analogous to the data structure used to return the variant sequence, but with an additional parameter denoting the start of a frameshift that should affect all bases downstream. :returns variant sequence data :rtype list of dictionaries
[ "given", "a", "variant", "and", "a", "sequence", "incorporate", "the", "variant", "and", "return", "the", "new", "sequence" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/altseqbuilder.py#L110-L174
232,918
biocommons/hgvs
hgvs/utils/altseqbuilder.py
AltSeqBuilder._incorporate_dup
def _incorporate_dup(self): """Incorporate dup into sequence""" seq, cds_start, cds_stop, start, end = self._setup_incorporate() dup_seq = seq[start:end] seq[end:end] = dup_seq is_frameshift = len(dup_seq) % 3 != 0 variant_start_aa = int(math.ceil((self._var_c.posedit.pos.end.base + 1) / 3.0)) alt_data = AltTranscriptData( seq, cds_start, cds_stop, is_frameshift, variant_start_aa, self._transcript_data.protein_accession, is_ambiguous=self._ref_has_multiple_stops) return alt_data
python
def _incorporate_dup(self): seq, cds_start, cds_stop, start, end = self._setup_incorporate() dup_seq = seq[start:end] seq[end:end] = dup_seq is_frameshift = len(dup_seq) % 3 != 0 variant_start_aa = int(math.ceil((self._var_c.posedit.pos.end.base + 1) / 3.0)) alt_data = AltTranscriptData( seq, cds_start, cds_stop, is_frameshift, variant_start_aa, self._transcript_data.protein_accession, is_ambiguous=self._ref_has_multiple_stops) return alt_data
[ "def", "_incorporate_dup", "(", "self", ")", ":", "seq", ",", "cds_start", ",", "cds_stop", ",", "start", ",", "end", "=", "self", ".", "_setup_incorporate", "(", ")", "dup_seq", "=", "seq", "[", "start", ":", "end", "]", "seq", "[", "end", ":", "end...
Incorporate dup into sequence
[ "Incorporate", "dup", "into", "sequence" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/altseqbuilder.py#L251-L269
232,919
biocommons/hgvs
hgvs/utils/altseqbuilder.py
AltSeqBuilder._incorporate_inv
def _incorporate_inv(self): """Incorporate inv into sequence""" seq, cds_start, cds_stop, start, end = self._setup_incorporate() seq[start:end] = list(reverse_complement(''.join(seq[start:end]))) is_frameshift = False variant_start_aa = max(int(math.ceil((self._var_c.posedit.pos.start.base) / 3.0)), 1) alt_data = AltTranscriptData( seq, cds_start, cds_stop, is_frameshift, variant_start_aa, self._transcript_data.protein_accession, is_ambiguous=self._ref_has_multiple_stops) return alt_data
python
def _incorporate_inv(self): seq, cds_start, cds_stop, start, end = self._setup_incorporate() seq[start:end] = list(reverse_complement(''.join(seq[start:end]))) is_frameshift = False variant_start_aa = max(int(math.ceil((self._var_c.posedit.pos.start.base) / 3.0)), 1) alt_data = AltTranscriptData( seq, cds_start, cds_stop, is_frameshift, variant_start_aa, self._transcript_data.protein_accession, is_ambiguous=self._ref_has_multiple_stops) return alt_data
[ "def", "_incorporate_inv", "(", "self", ")", ":", "seq", ",", "cds_start", ",", "cds_stop", ",", "start", ",", "end", "=", "self", ".", "_setup_incorporate", "(", ")", "seq", "[", "start", ":", "end", "]", "=", "list", "(", "reverse_complement", "(", "...
Incorporate inv into sequence
[ "Incorporate", "inv", "into", "sequence" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/altseqbuilder.py#L271-L288
232,920
biocommons/hgvs
hgvs/utils/altseqbuilder.py
AltSeqBuilder._create_no_protein
def _create_no_protein(self): """Create a no-protein result""" alt_data = AltTranscriptData([], None, None, False, None, self._transcript_data.protein_accession, is_ambiguous=False) return alt_data
python
def _create_no_protein(self): alt_data = AltTranscriptData([], None, None, False, None, self._transcript_data.protein_accession, is_ambiguous=False) return alt_data
[ "def", "_create_no_protein", "(", "self", ")", ":", "alt_data", "=", "AltTranscriptData", "(", "[", "]", ",", "None", ",", "None", ",", "False", ",", "None", ",", "self", ".", "_transcript_data", ".", "protein_accession", ",", "is_ambiguous", "=", "False", ...
Create a no-protein result
[ "Create", "a", "no", "-", "protein", "result" ]
4d16efb475e1802b2531a2f1c373e8819d8e533b
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/utils/altseqbuilder.py#L345-L354
232,921
jjjake/internetarchive
internetarchive/iarequest.py
S3PreparedRequest.prepare_headers
def prepare_headers(self, headers, metadata, queue_derive=True): """Convert a dictionary of metadata into S3 compatible HTTP headers, and append headers to ``headers``. :type metadata: dict :param metadata: Metadata to be converted into S3 HTTP Headers and appended to ``headers``. :type headers: dict :param headers: (optional) S3 compatible HTTP headers. """ if not metadata.get('scanner'): scanner = 'Internet Archive Python library {0}'.format(__version__) metadata['scanner'] = scanner prepared_metadata = prepare_metadata(metadata) headers['x-archive-auto-make-bucket'] = '1' if queue_derive is False: headers['x-archive-queue-derive'] = '0' else: headers['x-archive-queue-derive'] = '1' for meta_key, meta_value in prepared_metadata.items(): # Encode arrays into JSON strings because Archive.org does not # yet support complex metadata structures in # <identifier>_meta.xml. if isinstance(meta_value, dict): meta_value = json.dumps(meta_value) # Convert the metadata value into a list if it is not already # iterable. if (isinstance(meta_value, six.string_types) or not hasattr(meta_value, '__iter__')): meta_value = [meta_value] # Convert metadata items into HTTP headers and add to # ``headers`` dict. for i, value in enumerate(meta_value): if not value: continue header_key = 'x-archive-meta{0:02d}-{1}'.format(i, meta_key) if (isinstance(value, six.string_types) and needs_quote(value)): if six.PY2 and isinstance(value, six.text_type): value = value.encode('utf-8') value = 'uri({0})'.format(urllib.parse.quote(value)) # because rfc822 http headers disallow _ in names, IA-S3 will # translate two hyphens in a row (--) into an underscore (_). header_key = header_key.replace('_', '--') headers[header_key] = value super(S3PreparedRequest, self).prepare_headers(headers)
python
def prepare_headers(self, headers, metadata, queue_derive=True): if not metadata.get('scanner'): scanner = 'Internet Archive Python library {0}'.format(__version__) metadata['scanner'] = scanner prepared_metadata = prepare_metadata(metadata) headers['x-archive-auto-make-bucket'] = '1' if queue_derive is False: headers['x-archive-queue-derive'] = '0' else: headers['x-archive-queue-derive'] = '1' for meta_key, meta_value in prepared_metadata.items(): # Encode arrays into JSON strings because Archive.org does not # yet support complex metadata structures in # <identifier>_meta.xml. if isinstance(meta_value, dict): meta_value = json.dumps(meta_value) # Convert the metadata value into a list if it is not already # iterable. if (isinstance(meta_value, six.string_types) or not hasattr(meta_value, '__iter__')): meta_value = [meta_value] # Convert metadata items into HTTP headers and add to # ``headers`` dict. for i, value in enumerate(meta_value): if not value: continue header_key = 'x-archive-meta{0:02d}-{1}'.format(i, meta_key) if (isinstance(value, six.string_types) and needs_quote(value)): if six.PY2 and isinstance(value, six.text_type): value = value.encode('utf-8') value = 'uri({0})'.format(urllib.parse.quote(value)) # because rfc822 http headers disallow _ in names, IA-S3 will # translate two hyphens in a row (--) into an underscore (_). header_key = header_key.replace('_', '--') headers[header_key] = value super(S3PreparedRequest, self).prepare_headers(headers)
[ "def", "prepare_headers", "(", "self", ",", "headers", ",", "metadata", ",", "queue_derive", "=", "True", ")", ":", "if", "not", "metadata", ".", "get", "(", "'scanner'", ")", ":", "scanner", "=", "'Internet Archive Python library {0}'", ".", "format", "(", ...
Convert a dictionary of metadata into S3 compatible HTTP headers, and append headers to ``headers``. :type metadata: dict :param metadata: Metadata to be converted into S3 HTTP Headers and appended to ``headers``. :type headers: dict :param headers: (optional) S3 compatible HTTP headers.
[ "Convert", "a", "dictionary", "of", "metadata", "into", "S3", "compatible", "HTTP", "headers", "and", "append", "headers", "to", "headers", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/iarequest.py#L110-L158
232,922
jjjake/internetarchive
internetarchive/cli/ia.py
load_ia_module
def load_ia_module(cmd): """Dynamically import ia module.""" try: if cmd in list(cmd_aliases.keys()) + list(cmd_aliases.values()): _module = 'internetarchive.cli.ia_{0}'.format(cmd) return __import__(_module, fromlist=['internetarchive.cli']) else: _module = 'ia_{0}'.format(cmd) for ep in iter_entry_points('internetarchive.cli.plugins'): if ep.name == _module: return ep.load() raise ImportError except (ImportError, DistributionNotFound): print("error: '{0}' is not an ia command! See 'ia help'".format(cmd), file=sys.stderr) matches = '\t'.join(difflib.get_close_matches(cmd, cmd_aliases.values())) if matches: print('\nDid you mean one of these?\n\t{0}'.format(matches)) sys.exit(127)
python
def load_ia_module(cmd): try: if cmd in list(cmd_aliases.keys()) + list(cmd_aliases.values()): _module = 'internetarchive.cli.ia_{0}'.format(cmd) return __import__(_module, fromlist=['internetarchive.cli']) else: _module = 'ia_{0}'.format(cmd) for ep in iter_entry_points('internetarchive.cli.plugins'): if ep.name == _module: return ep.load() raise ImportError except (ImportError, DistributionNotFound): print("error: '{0}' is not an ia command! See 'ia help'".format(cmd), file=sys.stderr) matches = '\t'.join(difflib.get_close_matches(cmd, cmd_aliases.values())) if matches: print('\nDid you mean one of these?\n\t{0}'.format(matches)) sys.exit(127)
[ "def", "load_ia_module", "(", "cmd", ")", ":", "try", ":", "if", "cmd", "in", "list", "(", "cmd_aliases", ".", "keys", "(", ")", ")", "+", "list", "(", "cmd_aliases", ".", "values", "(", ")", ")", ":", "_module", "=", "'internetarchive.cli.ia_{0}'", "....
Dynamically import ia module.
[ "Dynamically", "import", "ia", "module", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/cli/ia.py#L86-L104
232,923
jjjake/internetarchive
internetarchive/cli/ia.py
main
def main(): """This is the CLI driver for ia-wrapper.""" args = docopt(__doc__, version=__version__, options_first=True) # Validate args. s = Schema({ six.text_type: bool, '--config-file': Or(None, str), '<args>': list, '<command>': Or(str, lambda _: 'help'), }) try: args = s.validate(args) except SchemaError as exc: print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)), file=sys.stderr) sys.exit(1) # Get subcommand. cmd = args['<command>'] if cmd in cmd_aliases: cmd = cmd_aliases[cmd] if (cmd == 'help') or (not cmd): if not args['<args>']: sys.exit(print(__doc__.strip(), file=sys.stderr)) else: ia_module = load_ia_module(args['<args>'][0]) sys.exit(print(ia_module.__doc__.strip(), file=sys.stderr)) if cmd != 'configure' and args['--config-file']: if not os.path.isfile(args['--config-file']): print('--config-file should be a readable file.\n{0}'.format( printable_usage(__doc__)), file=sys.stderr) sys.exit(1) argv = [cmd] + args['<args>'] config = dict() if args['--log']: config['logging'] = {'level': 'INFO'} elif args['--debug']: config['logging'] = {'level': 'DEBUG'} if args['--insecure']: config['general'] = dict(secure=False) session = get_session(config_file=args['--config-file'], config=config, debug=args['--debug']) ia_module = load_ia_module(cmd) try: sys.exit(ia_module.main(argv, session)) except IOError as e: # Handle Broken Pipe errors. if e.errno == errno.EPIPE: sys.stderr.close() sys.stdout.close() sys.exit(0) else: raise
python
def main(): args = docopt(__doc__, version=__version__, options_first=True) # Validate args. s = Schema({ six.text_type: bool, '--config-file': Or(None, str), '<args>': list, '<command>': Or(str, lambda _: 'help'), }) try: args = s.validate(args) except SchemaError as exc: print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)), file=sys.stderr) sys.exit(1) # Get subcommand. cmd = args['<command>'] if cmd in cmd_aliases: cmd = cmd_aliases[cmd] if (cmd == 'help') or (not cmd): if not args['<args>']: sys.exit(print(__doc__.strip(), file=sys.stderr)) else: ia_module = load_ia_module(args['<args>'][0]) sys.exit(print(ia_module.__doc__.strip(), file=sys.stderr)) if cmd != 'configure' and args['--config-file']: if not os.path.isfile(args['--config-file']): print('--config-file should be a readable file.\n{0}'.format( printable_usage(__doc__)), file=sys.stderr) sys.exit(1) argv = [cmd] + args['<args>'] config = dict() if args['--log']: config['logging'] = {'level': 'INFO'} elif args['--debug']: config['logging'] = {'level': 'DEBUG'} if args['--insecure']: config['general'] = dict(secure=False) session = get_session(config_file=args['--config-file'], config=config, debug=args['--debug']) ia_module = load_ia_module(cmd) try: sys.exit(ia_module.main(argv, session)) except IOError as e: # Handle Broken Pipe errors. if e.errno == errno.EPIPE: sys.stderr.close() sys.stdout.close() sys.exit(0) else: raise
[ "def", "main", "(", ")", ":", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ",", "options_first", "=", "True", ")", "# Validate args.", "s", "=", "Schema", "(", "{", "six", ".", "text_type", ":", "bool", ",", "'--config-file'...
This is the CLI driver for ia-wrapper.
[ "This", "is", "the", "CLI", "driver", "for", "ia", "-", "wrapper", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/cli/ia.py#L107-L167
232,924
jjjake/internetarchive
internetarchive/utils.py
suppress_keyboard_interrupt_message
def suppress_keyboard_interrupt_message(): """Register a new excepthook to suppress KeyboardInterrupt exception messages, and exit with status code 130. """ old_excepthook = sys.excepthook def new_hook(type, value, traceback): if type != KeyboardInterrupt: old_excepthook(type, value, traceback) else: sys.exit(130) sys.excepthook = new_hook
python
def suppress_keyboard_interrupt_message(): old_excepthook = sys.excepthook def new_hook(type, value, traceback): if type != KeyboardInterrupt: old_excepthook(type, value, traceback) else: sys.exit(130) sys.excepthook = new_hook
[ "def", "suppress_keyboard_interrupt_message", "(", ")", ":", "old_excepthook", "=", "sys", ".", "excepthook", "def", "new_hook", "(", "type", ",", "value", ",", "traceback", ")", ":", "if", "type", "!=", "KeyboardInterrupt", ":", "old_excepthook", "(", "type", ...
Register a new excepthook to suppress KeyboardInterrupt exception messages, and exit with status code 130.
[ "Register", "a", "new", "excepthook", "to", "suppress", "KeyboardInterrupt", "exception", "messages", "and", "exit", "with", "status", "code", "130", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/utils.py#L105-L118
232,925
jjjake/internetarchive
internetarchive/utils.py
recursive_file_count
def recursive_file_count(files, item=None, checksum=False): """Given a filepath or list of filepaths, return the total number of files.""" if not isinstance(files, (list, set)): files = [files] total_files = 0 if checksum is True: md5s = [f.get('md5') for f in item.files] else: md5s = list() if isinstance(files, dict): # make sure to use local filenames. _files = files.values() else: if isinstance(files[0], tuple): _files = dict(files).values() else: _files = files for f in _files: try: is_dir = os.path.isdir(f) except TypeError: try: f = f[0] is_dir = os.path.isdir(f) except (AttributeError, TypeError): is_dir = False if is_dir: for x, _ in iter_directory(f): lmd5 = get_md5(open(x, 'rb')) if lmd5 in md5s: continue else: total_files += 1 else: try: lmd5 = get_md5(open(f, 'rb')) except TypeError: # Support file-like objects. lmd5 = get_md5(f) if lmd5 in md5s: continue else: total_files += 1 return total_files
python
def recursive_file_count(files, item=None, checksum=False): if not isinstance(files, (list, set)): files = [files] total_files = 0 if checksum is True: md5s = [f.get('md5') for f in item.files] else: md5s = list() if isinstance(files, dict): # make sure to use local filenames. _files = files.values() else: if isinstance(files[0], tuple): _files = dict(files).values() else: _files = files for f in _files: try: is_dir = os.path.isdir(f) except TypeError: try: f = f[0] is_dir = os.path.isdir(f) except (AttributeError, TypeError): is_dir = False if is_dir: for x, _ in iter_directory(f): lmd5 = get_md5(open(x, 'rb')) if lmd5 in md5s: continue else: total_files += 1 else: try: lmd5 = get_md5(open(f, 'rb')) except TypeError: # Support file-like objects. lmd5 = get_md5(f) if lmd5 in md5s: continue else: total_files += 1 return total_files
[ "def", "recursive_file_count", "(", "files", ",", "item", "=", "None", ",", "checksum", "=", "False", ")", ":", "if", "not", "isinstance", "(", "files", ",", "(", "list", ",", "set", ")", ")", ":", "files", "=", "[", "files", "]", "total_files", "=",...
Given a filepath or list of filepaths, return the total number of files.
[ "Given", "a", "filepath", "or", "list", "of", "filepaths", "return", "the", "total", "number", "of", "files", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/utils.py#L210-L253
232,926
jjjake/internetarchive
internetarchive/utils.py
reraise_modify
def reraise_modify(caught_exc, append_msg, prepend=False): """Append message to exception while preserving attributes. Preserves exception class, and exception traceback. Note: This function needs to be called inside an except because `sys.exc_info()` requires the exception context. Args: caught_exc(Exception): The caught exception object append_msg(str): The message to append to the caught exception prepend(bool): If True prepend the message to args instead of appending Returns: None Side Effects: Re-raises the exception with the preserved data / trace but modified message """ ExceptClass = type(caught_exc) # Keep old traceback traceback = sys.exc_info()[2] if not caught_exc.args: # If no args, create our own tuple arg_list = [append_msg] else: # Take the last arg # If it is a string # append your message. # Otherwise append it to the # arg list(Not as pretty) arg_list = list(caught_exc.args[:-1]) last_arg = caught_exc.args[-1] if isinstance(last_arg, str): if prepend: arg_list.append(append_msg + last_arg) else: arg_list.append(last_arg + append_msg) else: arg_list += [last_arg, append_msg] caught_exc.args = tuple(arg_list) six.reraise(ExceptClass, caught_exc, traceback)
python
def reraise_modify(caught_exc, append_msg, prepend=False): ExceptClass = type(caught_exc) # Keep old traceback traceback = sys.exc_info()[2] if not caught_exc.args: # If no args, create our own tuple arg_list = [append_msg] else: # Take the last arg # If it is a string # append your message. # Otherwise append it to the # arg list(Not as pretty) arg_list = list(caught_exc.args[:-1]) last_arg = caught_exc.args[-1] if isinstance(last_arg, str): if prepend: arg_list.append(append_msg + last_arg) else: arg_list.append(last_arg + append_msg) else: arg_list += [last_arg, append_msg] caught_exc.args = tuple(arg_list) six.reraise(ExceptClass, caught_exc, traceback)
[ "def", "reraise_modify", "(", "caught_exc", ",", "append_msg", ",", "prepend", "=", "False", ")", ":", "ExceptClass", "=", "type", "(", "caught_exc", ")", "# Keep old traceback", "traceback", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "if", "not...
Append message to exception while preserving attributes. Preserves exception class, and exception traceback. Note: This function needs to be called inside an except because `sys.exc_info()` requires the exception context. Args: caught_exc(Exception): The caught exception object append_msg(str): The message to append to the caught exception prepend(bool): If True prepend the message to args instead of appending Returns: None Side Effects: Re-raises the exception with the preserved data / trace but modified message
[ "Append", "message", "to", "exception", "while", "preserving", "attributes", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/utils.py#L265-L310
232,927
jjjake/internetarchive
internetarchive/api.py
configure
def configure(username=None, password=None, config_file=None): """Configure internetarchive with your Archive.org credentials. :type username: str :param username: The email address associated with your Archive.org account. :type password: str :param password: Your Archive.org password. Usage: >>> from internetarchive import configure >>> configure('user@example.com', 'password') """ username = input('Email address: ') if not username else username password = getpass('Password: ') if not password else password config_file_path = config_module.write_config_file(username, password, config_file) return config_file_path
python
def configure(username=None, password=None, config_file=None): username = input('Email address: ') if not username else username password = getpass('Password: ') if not password else password config_file_path = config_module.write_config_file(username, password, config_file) return config_file_path
[ "def", "configure", "(", "username", "=", "None", ",", "password", "=", "None", ",", "config_file", "=", "None", ")", ":", "username", "=", "input", "(", "'Email address: '", ")", "if", "not", "username", "else", "username", "password", "=", "getpass", "("...
Configure internetarchive with your Archive.org credentials. :type username: str :param username: The email address associated with your Archive.org account. :type password: str :param password: Your Archive.org password. Usage: >>> from internetarchive import configure >>> configure('user@example.com', 'password')
[ "Configure", "internetarchive", "with", "your", "Archive", ".", "org", "credentials", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/api.py#L563-L579
232,928
jjjake/internetarchive
internetarchive/api.py
get_user_info
def get_user_info(access_key, secret_key): """Returns details about an Archive.org user given an IA-S3 key pair. :type access_key: str :param access_key: IA-S3 access_key to use when making the given request. :type secret_key: str :param secret_key: IA-S3 secret_key to use when making the given request. """ u = 'https://s3.us.archive.org' p = dict(check_auth=1) r = requests.get(u, params=p, auth=auth.S3Auth(access_key, secret_key)) r.raise_for_status() j = r.json() if j.get('error'): raise AuthenticationError(j.get('error')) else: return j
python
def get_user_info(access_key, secret_key): u = 'https://s3.us.archive.org' p = dict(check_auth=1) r = requests.get(u, params=p, auth=auth.S3Auth(access_key, secret_key)) r.raise_for_status() j = r.json() if j.get('error'): raise AuthenticationError(j.get('error')) else: return j
[ "def", "get_user_info", "(", "access_key", ",", "secret_key", ")", ":", "u", "=", "'https://s3.us.archive.org'", "p", "=", "dict", "(", "check_auth", "=", "1", ")", "r", "=", "requests", ".", "get", "(", "u", ",", "params", "=", "p", ",", "auth", "=", ...
Returns details about an Archive.org user given an IA-S3 key pair. :type access_key: str :param access_key: IA-S3 access_key to use when making the given request. :type secret_key: str :param secret_key: IA-S3 secret_key to use when making the given request.
[ "Returns", "details", "about", "an", "Archive", ".", "org", "user", "given", "an", "IA", "-", "S3", "key", "pair", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/api.py#L595-L612
232,929
jjjake/internetarchive
internetarchive/catalog.py
CatalogTask.task_log
def task_log(self): """Get task log. :rtype: str :returns: The task log as a string. """ if self.task_id is None: raise ValueError('task_id is None') return self.get_task_log(self.task_id, self.session, self.request_kwargs)
python
def task_log(self): if self.task_id is None: raise ValueError('task_id is None') return self.get_task_log(self.task_id, self.session, self.request_kwargs)
[ "def", "task_log", "(", "self", ")", ":", "if", "self", ".", "task_id", "is", "None", ":", "raise", "ValueError", "(", "'task_id is None'", ")", "return", "self", ".", "get_task_log", "(", "self", ".", "task_id", ",", "self", ".", "session", ",", "self",...
Get task log. :rtype: str :returns: The task log as a string.
[ "Get", "task", "log", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/catalog.py#L213-L222
232,930
jjjake/internetarchive
internetarchive/catalog.py
CatalogTask.get_task_log
def get_task_log(task_id, session, request_kwargs=None): """Static method for getting a task log, given a task_id. This method exists so a task log can be retrieved without retrieving the items task history first. :type task_id: str or int :param task_id: The task id for the task log you'd like to fetch. :type archive_session: :class:`ArchiveSession <ArchiveSession>` :type request_kwargs: dict :param request_kwargs: (optional) Keyword arguments that :py:class:`requests.Request` takes. :rtype: str :returns: The task log as a string. """ request_kwargs = request_kwargs if request_kwargs else dict() url = '{0}//catalogd.archive.org/log/{1}'.format(session.protocol, task_id) p = dict(full=1) r = session.get(url, params=p, **request_kwargs) r.raise_for_status() return r.content.decode('utf-8')
python
def get_task_log(task_id, session, request_kwargs=None): request_kwargs = request_kwargs if request_kwargs else dict() url = '{0}//catalogd.archive.org/log/{1}'.format(session.protocol, task_id) p = dict(full=1) r = session.get(url, params=p, **request_kwargs) r.raise_for_status() return r.content.decode('utf-8')
[ "def", "get_task_log", "(", "task_id", ",", "session", ",", "request_kwargs", "=", "None", ")", ":", "request_kwargs", "=", "request_kwargs", "if", "request_kwargs", "else", "dict", "(", ")", "url", "=", "'{0}//catalogd.archive.org/log/{1}'", ".", "format", "(", ...
Static method for getting a task log, given a task_id. This method exists so a task log can be retrieved without retrieving the items task history first. :type task_id: str or int :param task_id: The task id for the task log you'd like to fetch. :type archive_session: :class:`ArchiveSession <ArchiveSession>` :type request_kwargs: dict :param request_kwargs: (optional) Keyword arguments that :py:class:`requests.Request` takes. :rtype: str :returns: The task log as a string.
[ "Static", "method", "for", "getting", "a", "task", "log", "given", "a", "task_id", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/catalog.py#L225-L249
232,931
jjjake/internetarchive
internetarchive/session.py
ArchiveSession._get_user_agent_string
def _get_user_agent_string(self): """Generate a User-Agent string to be sent with every request.""" uname = platform.uname() try: lang = locale.getlocale()[0][:2] except: lang = '' py_version = '{0}.{1}.{2}'.format(*sys.version_info) return 'internetarchive/{0} ({1} {2}; N; {3}; {4}) Python/{5}'.format( __version__, uname[0], uname[-1], lang, self.access_key, py_version)
python
def _get_user_agent_string(self): uname = platform.uname() try: lang = locale.getlocale()[0][:2] except: lang = '' py_version = '{0}.{1}.{2}'.format(*sys.version_info) return 'internetarchive/{0} ({1} {2}; N; {3}; {4}) Python/{5}'.format( __version__, uname[0], uname[-1], lang, self.access_key, py_version)
[ "def", "_get_user_agent_string", "(", "self", ")", ":", "uname", "=", "platform", ".", "uname", "(", ")", "try", ":", "lang", "=", "locale", ".", "getlocale", "(", ")", "[", "0", "]", "[", ":", "2", "]", "except", ":", "lang", "=", "''", "py_versio...
Generate a User-Agent string to be sent with every request.
[ "Generate", "a", "User", "-", "Agent", "string", "to", "be", "sent", "with", "every", "request", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/session.py#L125-L134
232,932
jjjake/internetarchive
internetarchive/session.py
ArchiveSession.rebuild_auth
def rebuild_auth(self, prepared_request, response): """Never rebuild auth for archive.org URLs. """ u = urlparse(prepared_request.url) if u.netloc.endswith('archive.org'): return super(ArchiveSession, self).rebuild_auth(prepared_request, response)
python
def rebuild_auth(self, prepared_request, response): u = urlparse(prepared_request.url) if u.netloc.endswith('archive.org'): return super(ArchiveSession, self).rebuild_auth(prepared_request, response)
[ "def", "rebuild_auth", "(", "self", ",", "prepared_request", ",", "response", ")", ":", "u", "=", "urlparse", "(", "prepared_request", ".", "url", ")", "if", "u", ".", "netloc", ".", "endswith", "(", "'archive.org'", ")", ":", "return", "super", "(", "Ar...
Never rebuild auth for archive.org URLs.
[ "Never", "rebuild", "auth", "for", "archive", ".", "org", "URLs", "." ]
7c0c71bfe52490927a37ade15bd09b2733fea660
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/session.py#L136-L142
232,933
stephenmcd/django-forms-builder
forms_builder/forms/forms.py
FormForForm.email_to
def email_to(self): """ Return the value entered for the first field of type EmailField. """ for field in self.form_fields: if field.is_a(fields.EMAIL): return self.cleaned_data[field.slug] return None
python
def email_to(self): for field in self.form_fields: if field.is_a(fields.EMAIL): return self.cleaned_data[field.slug] return None
[ "def", "email_to", "(", "self", ")", ":", "for", "field", "in", "self", ".", "form_fields", ":", "if", "field", ".", "is_a", "(", "fields", ".", "EMAIL", ")", ":", "return", "self", ".", "cleaned_data", "[", "field", ".", "slug", "]", "return", "None...
Return the value entered for the first field of type EmailField.
[ "Return", "the", "value", "entered", "for", "the", "first", "field", "of", "type", "EmailField", "." ]
89fe03100ec09a6166cc0bf0022399bbbdca6298
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/forms.py#L255-L262
232,934
stephenmcd/django-forms-builder
forms_builder/forms/utils.py
unique_slug
def unique_slug(manager, slug_field, slug): """ Ensure slug is unique for the given manager, appending a digit if it isn't. """ max_length = manager.model._meta.get_field(slug_field).max_length slug = slug[:max_length] i = 0 while True: if i > 0: if i > 1: slug = slug.rsplit("-", 1)[0] # We need to keep the slug length under the slug fields max length. We need to # account for the length that is added by adding a random integer and `-`. slug = "%s-%s" % (slug[:max_length - len(str(i)) - 1], i) if not manager.filter(**{slug_field: slug}): break i += 1 return slug
python
def unique_slug(manager, slug_field, slug): max_length = manager.model._meta.get_field(slug_field).max_length slug = slug[:max_length] i = 0 while True: if i > 0: if i > 1: slug = slug.rsplit("-", 1)[0] # We need to keep the slug length under the slug fields max length. We need to # account for the length that is added by adding a random integer and `-`. slug = "%s-%s" % (slug[:max_length - len(str(i)) - 1], i) if not manager.filter(**{slug_field: slug}): break i += 1 return slug
[ "def", "unique_slug", "(", "manager", ",", "slug_field", ",", "slug", ")", ":", "max_length", "=", "manager", ".", "model", ".", "_meta", ".", "get_field", "(", "slug_field", ")", ".", "max_length", "slug", "=", "slug", "[", ":", "max_length", "]", "i", ...
Ensure slug is unique for the given manager, appending a digit if it isn't.
[ "Ensure", "slug", "is", "unique", "for", "the", "given", "manager", "appending", "a", "digit", "if", "it", "isn", "t", "." ]
89fe03100ec09a6166cc0bf0022399bbbdca6298
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/utils.py#L25-L43
232,935
stephenmcd/django-forms-builder
forms_builder/forms/utils.py
import_attr
def import_attr(path): """ Given a a Python dotted path to a variable in a module, imports the module and returns the variable in it. """ module_path, attr_name = path.rsplit(".", 1) return getattr(import_module(module_path), attr_name)
python
def import_attr(path): module_path, attr_name = path.rsplit(".", 1) return getattr(import_module(module_path), attr_name)
[ "def", "import_attr", "(", "path", ")", ":", "module_path", ",", "attr_name", "=", "path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "return", "getattr", "(", "import_module", "(", "module_path", ")", ",", "attr_name", ")" ]
Given a a Python dotted path to a variable in a module, imports the module and returns the variable in it.
[ "Given", "a", "a", "Python", "dotted", "path", "to", "a", "variable", "in", "a", "module", "imports", "the", "module", "and", "returns", "the", "variable", "in", "it", "." ]
89fe03100ec09a6166cc0bf0022399bbbdca6298
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/utils.py#L61-L67
232,936
stephenmcd/django-forms-builder
forms_builder/forms/views.py
form_sent
def form_sent(request, slug, template="forms/form_sent.html"): """ Show the response message. """ published = Form.objects.published(for_user=request.user) context = {"form": get_object_or_404(published, slug=slug)} return render_to_response(template, context, RequestContext(request))
python
def form_sent(request, slug, template="forms/form_sent.html"): published = Form.objects.published(for_user=request.user) context = {"form": get_object_or_404(published, slug=slug)} return render_to_response(template, context, RequestContext(request))
[ "def", "form_sent", "(", "request", ",", "slug", ",", "template", "=", "\"forms/form_sent.html\"", ")", ":", "published", "=", "Form", ".", "objects", ".", "published", "(", "for_user", "=", "request", ".", "user", ")", "context", "=", "{", "\"form\"", ":"...
Show the response message.
[ "Show", "the", "response", "message", "." ]
89fe03100ec09a6166cc0bf0022399bbbdca6298
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/views.py#L117-L123
232,937
stephenmcd/django-forms-builder
forms_builder/forms/admin.py
FormAdmin.get_queryset
def get_queryset(self, request): """ Annotate the queryset with the entries count for use in the admin list view. """ qs = super(FormAdmin, self).get_queryset(request) return qs.annotate(total_entries=Count("entries"))
python
def get_queryset(self, request): qs = super(FormAdmin, self).get_queryset(request) return qs.annotate(total_entries=Count("entries"))
[ "def", "get_queryset", "(", "self", ",", "request", ")", ":", "qs", "=", "super", "(", "FormAdmin", ",", "self", ")", ".", "get_queryset", "(", "request", ")", "return", "qs", ".", "annotate", "(", "total_entries", "=", "Count", "(", "\"entries\"", ")", ...
Annotate the queryset with the entries count for use in the admin list view.
[ "Annotate", "the", "queryset", "with", "the", "entries", "count", "for", "use", "in", "the", "admin", "list", "view", "." ]
89fe03100ec09a6166cc0bf0022399bbbdca6298
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/admin.py#L77-L83
232,938
stephenmcd/django-forms-builder
forms_builder/forms/admin.py
FormAdmin.file_view
def file_view(self, request, field_entry_id): """ Output the file for the requested field entry. """ model = self.fieldentry_model field_entry = get_object_or_404(model, id=field_entry_id) path = join(fs.location, field_entry.value) response = HttpResponse(content_type=guess_type(path)[0]) f = open(path, "r+b") response["Content-Disposition"] = "attachment; filename=%s" % f.name response.write(f.read()) f.close() return response
python
def file_view(self, request, field_entry_id): model = self.fieldentry_model field_entry = get_object_or_404(model, id=field_entry_id) path = join(fs.location, field_entry.value) response = HttpResponse(content_type=guess_type(path)[0]) f = open(path, "r+b") response["Content-Disposition"] = "attachment; filename=%s" % f.name response.write(f.read()) f.close() return response
[ "def", "file_view", "(", "self", ",", "request", ",", "field_entry_id", ")", ":", "model", "=", "self", ".", "fieldentry_model", "field_entry", "=", "get_object_or_404", "(", "model", ",", "id", "=", "field_entry_id", ")", "path", "=", "join", "(", "fs", "...
Output the file for the requested field entry.
[ "Output", "the", "file", "for", "the", "requested", "field", "entry", "." ]
89fe03100ec09a6166cc0bf0022399bbbdca6298
https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/admin.py#L191-L203
232,939
architv/soccer-cli
soccer/request_handler.py
RequestHandler.get_live_scores
def get_live_scores(self, use_12_hour_format): """Gets the live scores""" req = requests.get(RequestHandler.LIVE_URL) if req.status_code == requests.codes.ok: scores_data = [] scores = req.json() if len(scores["games"]) == 0: click.secho("No live action currently", fg="red", bold=True) return for score in scores['games']: # match football-data api structure d = {} d['homeTeam'] = {'name': score['homeTeamName']} d['awayTeam'] = {'name': score['awayTeamName']} d['score'] = {'fullTime': {'homeTeam': score['goalsHomeTeam'], 'awayTeam': score['goalsAwayTeam']}} d['league'] = score['league'] d['time'] = score['time'] scores_data.append(d) self.writer.live_scores(scores_data) else: click.secho("There was problem getting live scores", fg="red", bold=True)
python
def get_live_scores(self, use_12_hour_format): req = requests.get(RequestHandler.LIVE_URL) if req.status_code == requests.codes.ok: scores_data = [] scores = req.json() if len(scores["games"]) == 0: click.secho("No live action currently", fg="red", bold=True) return for score in scores['games']: # match football-data api structure d = {} d['homeTeam'] = {'name': score['homeTeamName']} d['awayTeam'] = {'name': score['awayTeamName']} d['score'] = {'fullTime': {'homeTeam': score['goalsHomeTeam'], 'awayTeam': score['goalsAwayTeam']}} d['league'] = score['league'] d['time'] = score['time'] scores_data.append(d) self.writer.live_scores(scores_data) else: click.secho("There was problem getting live scores", fg="red", bold=True)
[ "def", "get_live_scores", "(", "self", ",", "use_12_hour_format", ")", ":", "req", "=", "requests", ".", "get", "(", "RequestHandler", ".", "LIVE_URL", ")", "if", "req", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "scores_data", "="...
Gets the live scores
[ "Gets", "the", "live", "scores" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/request_handler.py#L32-L54
232,940
architv/soccer-cli
soccer/request_handler.py
RequestHandler.get_team_scores
def get_team_scores(self, team, time, show_upcoming, use_12_hour_format): """Queries the API and gets the particular team scores""" team_id = self.team_names.get(team, None) time_frame = 'n' if show_upcoming else 'p' if team_id: try: req = self._get('teams/{team_id}/matches?timeFrame={time_frame}{time}'.format( team_id=team_id, time_frame=time_frame, time=time)) team_scores = req.json() if len(team_scores["matches"]) == 0: click.secho("No action during past week. Change the time " "parameter to get more fixtures.", fg="red", bold=True) else: self.writer.team_scores(team_scores, time, show_upcoming, use_12_hour_format) except APIErrorException as e: click.secho(e.args[0], fg="red", bold=True) else: click.secho("Team code is not correct.", fg="red", bold=True)
python
def get_team_scores(self, team, time, show_upcoming, use_12_hour_format): team_id = self.team_names.get(team, None) time_frame = 'n' if show_upcoming else 'p' if team_id: try: req = self._get('teams/{team_id}/matches?timeFrame={time_frame}{time}'.format( team_id=team_id, time_frame=time_frame, time=time)) team_scores = req.json() if len(team_scores["matches"]) == 0: click.secho("No action during past week. Change the time " "parameter to get more fixtures.", fg="red", bold=True) else: self.writer.team_scores(team_scores, time, show_upcoming, use_12_hour_format) except APIErrorException as e: click.secho(e.args[0], fg="red", bold=True) else: click.secho("Team code is not correct.", fg="red", bold=True)
[ "def", "get_team_scores", "(", "self", ",", "team", ",", "time", ",", "show_upcoming", ",", "use_12_hour_format", ")", ":", "team_id", "=", "self", ".", "team_names", ".", "get", "(", "team", ",", "None", ")", "time_frame", "=", "'n'", "if", "show_upcoming...
Queries the API and gets the particular team scores
[ "Queries", "the", "API", "and", "gets", "the", "particular", "team", "scores" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/request_handler.py#L56-L75
232,941
architv/soccer-cli
soccer/request_handler.py
RequestHandler.get_standings
def get_standings(self, league): """Queries the API and gets the standings for a particular league""" league_id = self.league_ids[league] try: req = self._get('competitions/{id}/standings'.format( id=league_id)) self.writer.standings(req.json(), league) except APIErrorException: # Click handles incorrect League codes so this will only come up # if that league does not have standings available. ie. Champions League click.secho("No standings availble for {league}.".format(league=league), fg="red", bold=True)
python
def get_standings(self, league): league_id = self.league_ids[league] try: req = self._get('competitions/{id}/standings'.format( id=league_id)) self.writer.standings(req.json(), league) except APIErrorException: # Click handles incorrect League codes so this will only come up # if that league does not have standings available. ie. Champions League click.secho("No standings availble for {league}.".format(league=league), fg="red", bold=True)
[ "def", "get_standings", "(", "self", ",", "league", ")", ":", "league_id", "=", "self", ".", "league_ids", "[", "league", "]", "try", ":", "req", "=", "self", ".", "_get", "(", "'competitions/{id}/standings'", ".", "format", "(", "id", "=", "league_id", ...
Queries the API and gets the standings for a particular league
[ "Queries", "the", "API", "and", "gets", "the", "standings", "for", "a", "particular", "league" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/request_handler.py#L77-L88
232,942
architv/soccer-cli
soccer/request_handler.py
RequestHandler.get_league_scores
def get_league_scores(self, league, time, show_upcoming, use_12_hour_format): """ Queries the API and fetches the scores for fixtures based upon the league and time parameter """ time_frame = 'n' if show_upcoming else 'p' if league: try: league_id = self.league_ids[league] req = self._get('competitions/{id}/matches?timeFrame={time_frame}{time}'.format( id=league_id, time_frame=time_frame, time=str(time))) fixtures_results = req.json() # no fixtures in the past week. display a help message and return if len(fixtures_results["matches"]) == 0: click.secho("No {league} matches in the past week.".format(league=league), fg="red", bold=True) return self.writer.league_scores(fixtures_results, time, show_upcoming, use_12_hour_format) except APIErrorException: click.secho("No data for the given league.", fg="red", bold=True) else: # When no league specified. Print all available in time frame. try: req = self._get('matches?timeFrame={time_frame}{time}'.format( time_frame=time_frame, time=str(time))) fixtures_results = req.json() self.writer.league_scores(fixtures_results, time, show_upcoming, use_12_hour_format) except APIErrorException: click.secho("No data available.", fg="red", bold=True)
python
def get_league_scores(self, league, time, show_upcoming, use_12_hour_format): time_frame = 'n' if show_upcoming else 'p' if league: try: league_id = self.league_ids[league] req = self._get('competitions/{id}/matches?timeFrame={time_frame}{time}'.format( id=league_id, time_frame=time_frame, time=str(time))) fixtures_results = req.json() # no fixtures in the past week. display a help message and return if len(fixtures_results["matches"]) == 0: click.secho("No {league} matches in the past week.".format(league=league), fg="red", bold=True) return self.writer.league_scores(fixtures_results, time, show_upcoming, use_12_hour_format) except APIErrorException: click.secho("No data for the given league.", fg="red", bold=True) else: # When no league specified. Print all available in time frame. try: req = self._get('matches?timeFrame={time_frame}{time}'.format( time_frame=time_frame, time=str(time))) fixtures_results = req.json() self.writer.league_scores(fixtures_results, time, show_upcoming, use_12_hour_format) except APIErrorException: click.secho("No data available.", fg="red", bold=True)
[ "def", "get_league_scores", "(", "self", ",", "league", ",", "time", ",", "show_upcoming", ",", "use_12_hour_format", ")", ":", "time_frame", "=", "'n'", "if", "show_upcoming", "else", "'p'", "if", "league", ":", "try", ":", "league_id", "=", "self", ".", ...
Queries the API and fetches the scores for fixtures based upon the league and time parameter
[ "Queries", "the", "API", "and", "fetches", "the", "scores", "for", "fixtures", "based", "upon", "the", "league", "and", "time", "parameter" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/request_handler.py#L90-L124
232,943
architv/soccer-cli
soccer/request_handler.py
RequestHandler.get_team_players
def get_team_players(self, team): """ Queries the API and fetches the players for a particular team """ team_id = self.team_names.get(team, None) try: req = self._get('teams/{}/'.format(team_id)) team_players = req.json()['squad'] if not team_players: click.secho("No players found for this team", fg="red", bold=True) else: self.writer.team_players(team_players) except APIErrorException: click.secho("No data for the team. Please check the team code.", fg="red", bold=True)
python
def get_team_players(self, team): team_id = self.team_names.get(team, None) try: req = self._get('teams/{}/'.format(team_id)) team_players = req.json()['squad'] if not team_players: click.secho("No players found for this team", fg="red", bold=True) else: self.writer.team_players(team_players) except APIErrorException: click.secho("No data for the team. Please check the team code.", fg="red", bold=True)
[ "def", "get_team_players", "(", "self", ",", "team", ")", ":", "team_id", "=", "self", ".", "team_names", ".", "get", "(", "team", ",", "None", ")", "try", ":", "req", "=", "self", ".", "_get", "(", "'teams/{}/'", ".", "format", "(", "team_id", ")", ...
Queries the API and fetches the players for a particular team
[ "Queries", "the", "API", "and", "fetches", "the", "players", "for", "a", "particular", "team" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/request_handler.py#L126-L141
232,944
architv/soccer-cli
soccer/main.py
load_json
def load_json(file): """Load JSON file at app start""" here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, file)) as jfile: data = json.load(jfile) return data
python
def load_json(file): here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, file)) as jfile: data = json.load(jfile) return data
[ "def", "load_json", "(", "file", ")", ":", "here", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "here", ",", "file", ")", ")...
Load JSON file at app start
[ "Load", "JSON", "file", "at", "app", "start" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L13-L18
232,945
architv/soccer-cli
soccer/main.py
get_input_key
def get_input_key(): """Input API key and validate""" click.secho("No API key found!", fg="yellow", bold=True) click.secho("Please visit {} and get an API token.".format(RequestHandler.BASE_URL), fg="yellow", bold=True) while True: confkey = click.prompt(click.style("Enter API key", fg="yellow", bold=True)) if len(confkey) == 32: # 32 chars try: int(confkey, 16) # hexadecimal except ValueError: click.secho("Invalid API key", fg="red", bold=True) else: break else: click.secho("Invalid API key", fg="red", bold=True) return confkey
python
def get_input_key(): click.secho("No API key found!", fg="yellow", bold=True) click.secho("Please visit {} and get an API token.".format(RequestHandler.BASE_URL), fg="yellow", bold=True) while True: confkey = click.prompt(click.style("Enter API key", fg="yellow", bold=True)) if len(confkey) == 32: # 32 chars try: int(confkey, 16) # hexadecimal except ValueError: click.secho("Invalid API key", fg="red", bold=True) else: break else: click.secho("Invalid API key", fg="red", bold=True) return confkey
[ "def", "get_input_key", "(", ")", ":", "click", ".", "secho", "(", "\"No API key found!\"", ",", "fg", "=", "\"yellow\"", ",", "bold", "=", "True", ")", "click", ".", "secho", "(", "\"Please visit {} and get an API token.\"", ".", "format", "(", "RequestHandler"...
Input API key and validate
[ "Input", "API", "key", "and", "validate" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L26-L44
232,946
architv/soccer-cli
soccer/main.py
load_config_key
def load_config_key(): """Load API key from config file, write if needed""" global api_token try: api_token = os.environ['SOCCER_CLI_API_TOKEN'] except KeyError: home = os.path.expanduser("~") config = os.path.join(home, ".soccer-cli.ini") if not os.path.exists(config): with open(config, "w") as cfile: key = get_input_key() cfile.write(key) else: with open(config, "r") as cfile: key = cfile.read() if key: api_token = key else: os.remove(config) # remove 0-byte file click.secho('No API Token detected. ' 'Please visit {0} and get an API Token, ' 'which will be used by Soccer CLI ' 'to get access to the data.' .format(RequestHandler.BASE_URL), fg="red", bold=True) sys.exit(1) return api_token
python
def load_config_key(): global api_token try: api_token = os.environ['SOCCER_CLI_API_TOKEN'] except KeyError: home = os.path.expanduser("~") config = os.path.join(home, ".soccer-cli.ini") if not os.path.exists(config): with open(config, "w") as cfile: key = get_input_key() cfile.write(key) else: with open(config, "r") as cfile: key = cfile.read() if key: api_token = key else: os.remove(config) # remove 0-byte file click.secho('No API Token detected. ' 'Please visit {0} and get an API Token, ' 'which will be used by Soccer CLI ' 'to get access to the data.' .format(RequestHandler.BASE_URL), fg="red", bold=True) sys.exit(1) return api_token
[ "def", "load_config_key", "(", ")", ":", "global", "api_token", "try", ":", "api_token", "=", "os", ".", "environ", "[", "'SOCCER_CLI_API_TOKEN'", "]", "except", "KeyError", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "config...
Load API key from config file, write if needed
[ "Load", "API", "key", "from", "config", "file", "write", "if", "needed" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L47-L72
232,947
architv/soccer-cli
soccer/main.py
map_team_id
def map_team_id(code): """Take in team ID, read JSON file to map ID to name""" for team in TEAM_DATA: if team["code"] == code: click.secho(team["name"], fg="green") break else: click.secho("No team found for this code", fg="red", bold=True)
python
def map_team_id(code): for team in TEAM_DATA: if team["code"] == code: click.secho(team["name"], fg="green") break else: click.secho("No team found for this code", fg="red", bold=True)
[ "def", "map_team_id", "(", "code", ")", ":", "for", "team", "in", "TEAM_DATA", ":", "if", "team", "[", "\"code\"", "]", "==", "code", ":", "click", ".", "secho", "(", "team", "[", "\"name\"", "]", ",", "fg", "=", "\"green\"", ")", "break", "else", ...
Take in team ID, read JSON file to map ID to name
[ "Take", "in", "team", "ID", "read", "JSON", "file", "to", "map", "ID", "to", "name" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L75-L82
232,948
architv/soccer-cli
soccer/main.py
list_team_codes
def list_team_codes(): """List team names in alphabetical order of team ID, per league.""" # Sort teams by league, then alphabetical by code cleanlist = sorted(TEAM_DATA, key=lambda k: (k["league"]["name"], k["code"])) # Get league names leaguenames = sorted(list(set([team["league"]["name"] for team in cleanlist]))) for league in leaguenames: teams = [team for team in cleanlist if team["league"]["name"] == league] click.secho(league, fg="green", bold=True) for team in teams: if team["code"] != "null": click.secho(u"{0}: {1}".format(team["code"], team["name"]), fg="yellow") click.secho("")
python
def list_team_codes(): # Sort teams by league, then alphabetical by code cleanlist = sorted(TEAM_DATA, key=lambda k: (k["league"]["name"], k["code"])) # Get league names leaguenames = sorted(list(set([team["league"]["name"] for team in cleanlist]))) for league in leaguenames: teams = [team for team in cleanlist if team["league"]["name"] == league] click.secho(league, fg="green", bold=True) for team in teams: if team["code"] != "null": click.secho(u"{0}: {1}".format(team["code"], team["name"]), fg="yellow") click.secho("")
[ "def", "list_team_codes", "(", ")", ":", "# Sort teams by league, then alphabetical by code", "cleanlist", "=", "sorted", "(", "TEAM_DATA", ",", "key", "=", "lambda", "k", ":", "(", "k", "[", "\"league\"", "]", "[", "\"name\"", "]", ",", "k", "[", "\"code\"", ...
List team names in alphabetical order of team ID, per league.
[ "List", "team", "names", "in", "alphabetical", "order", "of", "team", "ID", "per", "league", "." ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L85-L97
232,949
architv/soccer-cli
soccer/main.py
main
def main(league, time, standings, team, live, use12hour, players, output_format, output_file, upcoming, lookup, listcodes, apikey): """ A CLI for live and past football scores from various football leagues. League codes: \b - WC: World Cup - EC: European Championship - CL: Champions League - PL: English Premier League - ELC: English Championship - FL1: French Ligue 1 - BL: German Bundesliga - SA: Serie A - DED: Eredivisie - PPL: Primeira Liga - PD: Primera Division - BSA: Brazil Serie A """ headers = {'X-Auth-Token': apikey} try: if output_format == 'stdout' and output_file: raise IncorrectParametersException('Printing output to stdout and ' 'saving to a file are mutually exclusive') writer = get_writer(output_format, output_file) rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer) if listcodes: list_team_codes() return if live: rh.get_live_scores(use12hour) return if standings: if not league: raise IncorrectParametersException('Please specify a league. ' 'Example --standings --league=PL') if league == 'CL': raise IncorrectParametersException('Standings for CL - ' 'Champions League not supported') rh.get_standings(league) return if team: if lookup: map_team_id(team) return if players: rh.get_team_players(team) return else: rh.get_team_scores(team, time, upcoming, use12hour) return rh.get_league_scores(league, time, upcoming, use12hour) except IncorrectParametersException as e: click.secho(str(e), fg="red", bold=True)
python
def main(league, time, standings, team, live, use12hour, players, output_format, output_file, upcoming, lookup, listcodes, apikey): headers = {'X-Auth-Token': apikey} try: if output_format == 'stdout' and output_file: raise IncorrectParametersException('Printing output to stdout and ' 'saving to a file are mutually exclusive') writer = get_writer(output_format, output_file) rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer) if listcodes: list_team_codes() return if live: rh.get_live_scores(use12hour) return if standings: if not league: raise IncorrectParametersException('Please specify a league. ' 'Example --standings --league=PL') if league == 'CL': raise IncorrectParametersException('Standings for CL - ' 'Champions League not supported') rh.get_standings(league) return if team: if lookup: map_team_id(team) return if players: rh.get_team_players(team) return else: rh.get_team_scores(team, time, upcoming, use12hour) return rh.get_league_scores(league, time, upcoming, use12hour) except IncorrectParametersException as e: click.secho(str(e), fg="red", bold=True)
[ "def", "main", "(", "league", ",", "time", ",", "standings", ",", "team", ",", "live", ",", "use12hour", ",", "players", ",", "output_format", ",", "output_file", ",", "upcoming", ",", "lookup", ",", "listcodes", ",", "apikey", ")", ":", "headers", "=", ...
A CLI for live and past football scores from various football leagues. League codes: \b - WC: World Cup - EC: European Championship - CL: Champions League - PL: English Premier League - ELC: English Championship - FL1: French Ligue 1 - BL: German Bundesliga - SA: Serie A - DED: Eredivisie - PPL: Primeira Liga - PD: Primera Division - BSA: Brazil Serie A
[ "A", "CLI", "for", "live", "and", "past", "football", "scores", "from", "various", "football", "leagues", "." ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L133-L194
232,950
architv/soccer-cli
soccer/writers.py
Stdout.live_scores
def live_scores(self, live_scores): """Prints the live scores in a pretty format""" scores = sorted(live_scores, key=lambda x: x["league"]) for league, games in groupby(scores, key=lambda x: x["league"]): self.league_header(league) for game in games: self.scores(self.parse_result(game), add_new_line=False) click.secho(' %s' % Stdout.utc_to_local(game["time"], use_12_hour_format=False), fg=self.colors.TIME) click.echo()
python
def live_scores(self, live_scores): scores = sorted(live_scores, key=lambda x: x["league"]) for league, games in groupby(scores, key=lambda x: x["league"]): self.league_header(league) for game in games: self.scores(self.parse_result(game), add_new_line=False) click.secho(' %s' % Stdout.utc_to_local(game["time"], use_12_hour_format=False), fg=self.colors.TIME) click.echo()
[ "def", "live_scores", "(", "self", ",", "live_scores", ")", ":", "scores", "=", "sorted", "(", "live_scores", ",", "key", "=", "lambda", "x", ":", "x", "[", "\"league\"", "]", ")", "for", "league", ",", "games", "in", "groupby", "(", "scores", ",", "...
Prints the live scores in a pretty format
[ "Prints", "the", "live", "scores", "in", "a", "pretty", "format" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L67-L77
232,951
architv/soccer-cli
soccer/writers.py
Stdout.team_scores
def team_scores(self, team_scores, time, show_datetime, use_12_hour_format): """Prints the teams scores in a pretty format""" for score in team_scores["matches"]: if score["status"] == "FINISHED": click.secho("%s\t" % score["utcDate"].split('T')[0], fg=self.colors.TIME, nl=False) self.scores(self.parse_result(score)) elif show_datetime: self.scores(self.parse_result(score), add_new_line=False) click.secho(' %s' % Stdout.utc_to_local(score["utcDate"], use_12_hour_format, show_datetime), fg=self.colors.TIME)
python
def team_scores(self, team_scores, time, show_datetime, use_12_hour_format): for score in team_scores["matches"]: if score["status"] == "FINISHED": click.secho("%s\t" % score["utcDate"].split('T')[0], fg=self.colors.TIME, nl=False) self.scores(self.parse_result(score)) elif show_datetime: self.scores(self.parse_result(score), add_new_line=False) click.secho(' %s' % Stdout.utc_to_local(score["utcDate"], use_12_hour_format, show_datetime), fg=self.colors.TIME)
[ "def", "team_scores", "(", "self", ",", "team_scores", ",", "time", ",", "show_datetime", ",", "use_12_hour_format", ")", ":", "for", "score", "in", "team_scores", "[", "\"matches\"", "]", ":", "if", "score", "[", "\"status\"", "]", "==", "\"FINISHED\"", ":"...
Prints the teams scores in a pretty format
[ "Prints", "the", "teams", "scores", "in", "a", "pretty", "format" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L79-L91
232,952
architv/soccer-cli
soccer/writers.py
Stdout.team_players
def team_players(self, team): """Prints the team players in a pretty format""" players = sorted(team, key=lambda d: d['shirtNumber']) click.secho("%-4s %-25s %-20s %-20s %-15s" % ("N.", "NAME", "POSITION", "NATIONALITY", "BIRTHDAY"), bold=True, fg=self.colors.MISC) fmt = (u"{shirtNumber:<4} {name:<28} {position:<23} {nationality:<23}" u" {dateOfBirth:<18}") for player in players: click.secho(fmt.format(**player), bold=True)
python
def team_players(self, team): players = sorted(team, key=lambda d: d['shirtNumber']) click.secho("%-4s %-25s %-20s %-20s %-15s" % ("N.", "NAME", "POSITION", "NATIONALITY", "BIRTHDAY"), bold=True, fg=self.colors.MISC) fmt = (u"{shirtNumber:<4} {name:<28} {position:<23} {nationality:<23}" u" {dateOfBirth:<18}") for player in players: click.secho(fmt.format(**player), bold=True)
[ "def", "team_players", "(", "self", ",", "team", ")", ":", "players", "=", "sorted", "(", "team", ",", "key", "=", "lambda", "d", ":", "d", "[", "'shirtNumber'", "]", ")", "click", ".", "secho", "(", "\"%-4s %-25s %-20s %-20s %-15s\"", "%", "(", ...
Prints the team players in a pretty format
[ "Prints", "the", "team", "players", "in", "a", "pretty", "format" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L93-L103
232,953
architv/soccer-cli
soccer/writers.py
Stdout.standings
def standings(self, league_table, league): """ Prints the league standings in a pretty way """ click.secho("%-6s %-30s %-10s %-10s %-10s" % ("POS", "CLUB", "PLAYED", "GOAL DIFF", "POINTS")) for team in league_table["standings"][0]["table"]: if team["goalDifference"] >= 0: team["goalDifference"] = ' ' + str(team["goalDifference"]) # Define the upper and lower bounds for Champions League, # Europa League and Relegation places. # This is so we can highlight them appropriately. cl_upper, cl_lower = LEAGUE_PROPERTIES[league]['cl'] el_upper, el_lower = LEAGUE_PROPERTIES[league]['el'] rl_upper, rl_lower = LEAGUE_PROPERTIES[league]['rl'] team['teamName'] = team['team']['name'] team_str = (u"{position:<7} {teamName:<33} {playedGames:<12}" u" {goalDifference:<14} {points}").format(**team) if cl_upper <= team["position"] <= cl_lower: click.secho(team_str, bold=True, fg=self.colors.CL_POSITION) elif el_upper <= team["position"] <= el_lower: click.secho(team_str, fg=self.colors.EL_POSITION) elif rl_upper <= team["position"] <= rl_lower: click.secho(team_str, fg=self.colors.RL_POSITION) else: click.secho(team_str, fg=self.colors.POSITION)
python
def standings(self, league_table, league): click.secho("%-6s %-30s %-10s %-10s %-10s" % ("POS", "CLUB", "PLAYED", "GOAL DIFF", "POINTS")) for team in league_table["standings"][0]["table"]: if team["goalDifference"] >= 0: team["goalDifference"] = ' ' + str(team["goalDifference"]) # Define the upper and lower bounds for Champions League, # Europa League and Relegation places. # This is so we can highlight them appropriately. cl_upper, cl_lower = LEAGUE_PROPERTIES[league]['cl'] el_upper, el_lower = LEAGUE_PROPERTIES[league]['el'] rl_upper, rl_lower = LEAGUE_PROPERTIES[league]['rl'] team['teamName'] = team['team']['name'] team_str = (u"{position:<7} {teamName:<33} {playedGames:<12}" u" {goalDifference:<14} {points}").format(**team) if cl_upper <= team["position"] <= cl_lower: click.secho(team_str, bold=True, fg=self.colors.CL_POSITION) elif el_upper <= team["position"] <= el_lower: click.secho(team_str, fg=self.colors.EL_POSITION) elif rl_upper <= team["position"] <= rl_lower: click.secho(team_str, fg=self.colors.RL_POSITION) else: click.secho(team_str, fg=self.colors.POSITION)
[ "def", "standings", "(", "self", ",", "league_table", ",", "league", ")", ":", "click", ".", "secho", "(", "\"%-6s %-30s %-10s %-10s %-10s\"", "%", "(", "\"POS\"", ",", "\"CLUB\"", ",", "\"PLAYED\"", ",", "\"GOAL DIFF\"", ",", "\"POINTS\"", ")", ")", ...
Prints the league standings in a pretty way
[ "Prints", "the", "league", "standings", "in", "a", "pretty", "way" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L105-L129
232,954
architv/soccer-cli
soccer/writers.py
Stdout.league_scores
def league_scores(self, total_data, time, show_datetime, use_12_hour_format): """Prints the data in a pretty format""" for match in total_data['matches']: self.scores(self.parse_result(match), add_new_line=not show_datetime) if show_datetime: click.secho(' %s' % Stdout.utc_to_local(match["utcDate"], use_12_hour_format, show_datetime), fg=self.colors.TIME) click.echo()
python
def league_scores(self, total_data, time, show_datetime, use_12_hour_format): for match in total_data['matches']: self.scores(self.parse_result(match), add_new_line=not show_datetime) if show_datetime: click.secho(' %s' % Stdout.utc_to_local(match["utcDate"], use_12_hour_format, show_datetime), fg=self.colors.TIME) click.echo()
[ "def", "league_scores", "(", "self", ",", "total_data", ",", "time", ",", "show_datetime", ",", "use_12_hour_format", ")", ":", "for", "match", "in", "total_data", "[", "'matches'", "]", ":", "self", ".", "scores", "(", "self", ".", "parse_result", "(", "m...
Prints the data in a pretty format
[ "Prints", "the", "data", "in", "a", "pretty", "format" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L131-L141
232,955
architv/soccer-cli
soccer/writers.py
Stdout.league_header
def league_header(self, league): """Prints the league header""" league_name = " {0} ".format(league) click.secho("{:=^62}".format(league_name), fg=self.colors.MISC) click.echo()
python
def league_header(self, league): league_name = " {0} ".format(league) click.secho("{:=^62}".format(league_name), fg=self.colors.MISC) click.echo()
[ "def", "league_header", "(", "self", ",", "league", ")", ":", "league_name", "=", "\" {0} \"", ".", "format", "(", "league", ")", "click", ".", "secho", "(", "\"{:=^62}\"", ".", "format", "(", "league_name", ")", ",", "fg", "=", "self", ".", "colors", ...
Prints the league header
[ "Prints", "the", "league", "header" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L143-L147
232,956
architv/soccer-cli
soccer/writers.py
Stdout.scores
def scores(self, result, add_new_line=True): """Prints out the scores in a pretty format""" if result.goalsHomeTeam > result.goalsAwayTeam: homeColor, awayColor = (self.colors.WIN, self.colors.LOSE) elif result.goalsHomeTeam < result.goalsAwayTeam: homeColor, awayColor = (self.colors.LOSE, self.colors.WIN) else: homeColor = awayColor = self.colors.TIE click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam), fg=homeColor, nl=False) click.secho(" vs ", nl=False) click.secho('%2s %s' % (result.goalsAwayTeam, result.awayTeam.rjust(25)), fg=awayColor, nl=add_new_line)
python
def scores(self, result, add_new_line=True): if result.goalsHomeTeam > result.goalsAwayTeam: homeColor, awayColor = (self.colors.WIN, self.colors.LOSE) elif result.goalsHomeTeam < result.goalsAwayTeam: homeColor, awayColor = (self.colors.LOSE, self.colors.WIN) else: homeColor = awayColor = self.colors.TIE click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam), fg=homeColor, nl=False) click.secho(" vs ", nl=False) click.secho('%2s %s' % (result.goalsAwayTeam, result.awayTeam.rjust(25)), fg=awayColor, nl=add_new_line)
[ "def", "scores", "(", "self", ",", "result", ",", "add_new_line", "=", "True", ")", ":", "if", "result", ".", "goalsHomeTeam", ">", "result", ".", "goalsAwayTeam", ":", "homeColor", ",", "awayColor", "=", "(", "self", ".", "colors", ".", "WIN", ",", "s...
Prints out the scores in a pretty format
[ "Prints", "out", "the", "scores", "in", "a", "pretty", "format" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L149-L163
232,957
architv/soccer-cli
soccer/writers.py
Stdout.parse_result
def parse_result(self, data): """Parses the results and returns a Result namedtuple""" def valid_score(score): return "" if score is None else score return self.Result( data["homeTeam"]["name"], valid_score(data["score"]["fullTime"]["homeTeam"]), data["awayTeam"]["name"], valid_score(data["score"]["fullTime"]["awayTeam"]))
python
def parse_result(self, data): def valid_score(score): return "" if score is None else score return self.Result( data["homeTeam"]["name"], valid_score(data["score"]["fullTime"]["homeTeam"]), data["awayTeam"]["name"], valid_score(data["score"]["fullTime"]["awayTeam"]))
[ "def", "parse_result", "(", "self", ",", "data", ")", ":", "def", "valid_score", "(", "score", ")", ":", "return", "\"\"", "if", "score", "is", "None", "else", "score", "return", "self", ".", "Result", "(", "data", "[", "\"homeTeam\"", "]", "[", "\"nam...
Parses the results and returns a Result namedtuple
[ "Parses", "the", "results", "and", "returns", "a", "Result", "namedtuple" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L165-L174
232,958
architv/soccer-cli
soccer/writers.py
Stdout.utc_to_local
def utc_to_local(time_str, use_12_hour_format, show_datetime=False): """Converts the API UTC time string to the local user time.""" if not (time_str.endswith(" UTC") or time_str.endswith("Z")): return time_str today_utc = datetime.datetime.utcnow() utc_local_diff = today_utc - datetime.datetime.now() if time_str.endswith(" UTC"): time_str, _ = time_str.split(" UTC") utc_time = datetime.datetime.strptime(time_str, '%I:%M %p') utc_datetime = datetime.datetime(today_utc.year, today_utc.month, today_utc.day, utc_time.hour, utc_time.minute) else: utc_datetime = datetime.datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%SZ') local_time = utc_datetime - utc_local_diff if use_12_hour_format: date_format = '%I:%M %p' if not show_datetime else '%a %d, %I:%M %p' else: date_format = '%H:%M' if not show_datetime else '%a %d, %H:%M' return datetime.datetime.strftime(local_time, date_format)
python
def utc_to_local(time_str, use_12_hour_format, show_datetime=False): if not (time_str.endswith(" UTC") or time_str.endswith("Z")): return time_str today_utc = datetime.datetime.utcnow() utc_local_diff = today_utc - datetime.datetime.now() if time_str.endswith(" UTC"): time_str, _ = time_str.split(" UTC") utc_time = datetime.datetime.strptime(time_str, '%I:%M %p') utc_datetime = datetime.datetime(today_utc.year, today_utc.month, today_utc.day, utc_time.hour, utc_time.minute) else: utc_datetime = datetime.datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%SZ') local_time = utc_datetime - utc_local_diff if use_12_hour_format: date_format = '%I:%M %p' if not show_datetime else '%a %d, %I:%M %p' else: date_format = '%H:%M' if not show_datetime else '%a %d, %H:%M' return datetime.datetime.strftime(local_time, date_format)
[ "def", "utc_to_local", "(", "time_str", ",", "use_12_hour_format", ",", "show_datetime", "=", "False", ")", ":", "if", "not", "(", "time_str", ".", "endswith", "(", "\" UTC\"", ")", "or", "time_str", ".", "endswith", "(", "\"Z\"", ")", ")", ":", "return", ...
Converts the API UTC time string to the local user time.
[ "Converts", "the", "API", "UTC", "time", "string", "to", "the", "local", "user", "time", "." ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L177-L204
232,959
architv/soccer-cli
soccer/writers.py
Csv.live_scores
def live_scores(self, live_scores): """Store output of live scores to a CSV file""" headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([game['league'], game['homeTeamName'], game['goalsHomeTeam'], game['goalsAwayTeam'], game['awayTeamName']] for game in live_scores['games']) self.generate_output(result)
python
def live_scores(self, live_scores): headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([game['league'], game['homeTeamName'], game['goalsHomeTeam'], game['goalsAwayTeam'], game['awayTeamName']] for game in live_scores['games']) self.generate_output(result)
[ "def", "live_scores", "(", "self", ",", "live_scores", ")", ":", "headers", "=", "[", "'League'", ",", "'Home Team Name'", ",", "'Home Team Goals'", ",", "'Away Team Goals'", ",", "'Away Team Name'", "]", "result", "=", "[", "headers", "]", "result", ".", "ext...
Store output of live scores to a CSV file
[ "Store", "output", "of", "live", "scores", "to", "a", "CSV", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L220-L228
232,960
architv/soccer-cli
soccer/writers.py
Csv.team_scores
def team_scores(self, team_scores, time): """Store output of team scores to a CSV file""" headers = ['Date', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([score["utcDate"].split('T')[0], score['homeTeam']['name'], score['score']['fullTime']['homeTeam'], score['score']['fullTime']['awayTeam'], score['awayTeam']['name']] for score in team_scores['matches'] if score['status'] == 'FINISHED') self.generate_output(result)
python
def team_scores(self, team_scores, time): headers = ['Date', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([score["utcDate"].split('T')[0], score['homeTeam']['name'], score['score']['fullTime']['homeTeam'], score['score']['fullTime']['awayTeam'], score['awayTeam']['name']] for score in team_scores['matches'] if score['status'] == 'FINISHED') self.generate_output(result)
[ "def", "team_scores", "(", "self", ",", "team_scores", ",", "time", ")", ":", "headers", "=", "[", "'Date'", ",", "'Home Team Name'", ",", "'Home Team Goals'", ",", "'Away Team Goals'", ",", "'Away Team Name'", "]", "result", "=", "[", "headers", "]", "result"...
Store output of team scores to a CSV file
[ "Store", "output", "of", "team", "scores", "to", "a", "CSV", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L230-L242
232,961
architv/soccer-cli
soccer/writers.py
Csv.team_players
def team_players(self, team): """Store output of team players to a CSV file""" headers = ['Jersey Number', 'Name', 'Position', 'Nationality', 'Date of Birth'] result = [headers] result.extend([player['shirtNumber'], player['name'], player['position'], player['nationality'], player['dateOfBirth']] for player in team) self.generate_output(result)
python
def team_players(self, team): headers = ['Jersey Number', 'Name', 'Position', 'Nationality', 'Date of Birth'] result = [headers] result.extend([player['shirtNumber'], player['name'], player['position'], player['nationality'], player['dateOfBirth']] for player in team) self.generate_output(result)
[ "def", "team_players", "(", "self", ",", "team", ")", ":", "headers", "=", "[", "'Jersey Number'", ",", "'Name'", ",", "'Position'", ",", "'Nationality'", ",", "'Date of Birth'", "]", "result", "=", "[", "headers", "]", "result", ".", "extend", "(", "[", ...
Store output of team players to a CSV file
[ "Store", "output", "of", "team", "players", "to", "a", "CSV", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L244-L256
232,962
architv/soccer-cli
soccer/writers.py
Csv.standings
def standings(self, league_table, league): """Store output of league standings to a CSV file""" headers = ['Position', 'Team Name', 'Games Played', 'Goal For', 'Goals Against', 'Goal Difference', 'Points'] result = [headers] result.extend([team['position'], team['team']['name'], team['playedGames'], team['goalsFor'], team['goalsAgainst'], team['goalDifference'], team['points']] for team in league_table['standings'][0]['table']) self.generate_output(result)
python
def standings(self, league_table, league): headers = ['Position', 'Team Name', 'Games Played', 'Goal For', 'Goals Against', 'Goal Difference', 'Points'] result = [headers] result.extend([team['position'], team['team']['name'], team['playedGames'], team['goalsFor'], team['goalsAgainst'], team['goalDifference'], team['points']] for team in league_table['standings'][0]['table']) self.generate_output(result)
[ "def", "standings", "(", "self", ",", "league_table", ",", "league", ")", ":", "headers", "=", "[", "'Position'", ",", "'Team Name'", ",", "'Games Played'", ",", "'Goal For'", ",", "'Goals Against'", ",", "'Goal Difference'", ",", "'Points'", "]", "result", "=...
Store output of league standings to a CSV file
[ "Store", "output", "of", "league", "standings", "to", "a", "CSV", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L258-L271
232,963
architv/soccer-cli
soccer/writers.py
Csv.league_scores
def league_scores(self, total_data, time, show_upcoming, use_12_hour_format): """Store output of fixtures based on league and time to a CSV file""" headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] league = total_data['competition']['name'] result.extend([league, score['homeTeam']['name'], score['score']['fullTime']['homeTeam'], score['score']['fullTime']['awayTeam'], score['awayTeam']['name']] for score in total_data['matches']) self.generate_output(result)
python
def league_scores(self, total_data, time, show_upcoming, use_12_hour_format): headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] league = total_data['competition']['name'] result.extend([league, score['homeTeam']['name'], score['score']['fullTime']['homeTeam'], score['score']['fullTime']['awayTeam'], score['awayTeam']['name']] for score in total_data['matches']) self.generate_output(result)
[ "def", "league_scores", "(", "self", ",", "total_data", ",", "time", ",", "show_upcoming", ",", "use_12_hour_format", ")", ":", "headers", "=", "[", "'League'", ",", "'Home Team Name'", ",", "'Home Team Goals'", ",", "'Away Team Goals'", ",", "'Away Team Name'", "...
Store output of fixtures based on league and time to a CSV file
[ "Store", "output", "of", "fixtures", "based", "on", "league", "and", "time", "to", "a", "CSV", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L273-L285
232,964
architv/soccer-cli
soccer/writers.py
Json.team_scores
def team_scores(self, team_scores, time): """Store output of team scores to a JSON file""" data = [] for score in team_scores['matches']: if score['status'] == 'FINISHED': item = {'date': score["utcDate"].split('T')[0], 'homeTeamName': score['homeTeam']['name'], 'goalsHomeTeam': score['score']['fullTime']['homeTeam'], 'goalsAwayTeam': score['score']['fullTime']['awayTeam'], 'awayTeamName': score['awayTeam']['name']} data.append(item) self.generate_output({'team_scores': data})
python
def team_scores(self, team_scores, time): data = [] for score in team_scores['matches']: if score['status'] == 'FINISHED': item = {'date': score["utcDate"].split('T')[0], 'homeTeamName': score['homeTeam']['name'], 'goalsHomeTeam': score['score']['fullTime']['homeTeam'], 'goalsAwayTeam': score['score']['fullTime']['awayTeam'], 'awayTeamName': score['awayTeam']['name']} data.append(item) self.generate_output({'team_scores': data})
[ "def", "team_scores", "(", "self", ",", "team_scores", ",", "time", ")", ":", "data", "=", "[", "]", "for", "score", "in", "team_scores", "[", "'matches'", "]", ":", "if", "score", "[", "'status'", "]", "==", "'FINISHED'", ":", "item", "=", "{", "'da...
Store output of team scores to a JSON file
[ "Store", "output", "of", "team", "scores", "to", "a", "JSON", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L306-L317
232,965
architv/soccer-cli
soccer/writers.py
Json.standings
def standings(self, league_table, league): """Store output of league standings to a JSON file""" data = [] for team in league_table['standings'][0]['table']: item = {'position': team['position'], 'teamName': team['team'], 'playedGames': team['playedGames'], 'goalsFor': team['goalsFor'], 'goalsAgainst': team['goalsAgainst'], 'goalDifference': team['goalDifference'], 'points': team['points']} data.append(item) self.generate_output({'standings': data})
python
def standings(self, league_table, league): data = [] for team in league_table['standings'][0]['table']: item = {'position': team['position'], 'teamName': team['team'], 'playedGames': team['playedGames'], 'goalsFor': team['goalsFor'], 'goalsAgainst': team['goalsAgainst'], 'goalDifference': team['goalDifference'], 'points': team['points']} data.append(item) self.generate_output({'standings': data})
[ "def", "standings", "(", "self", ",", "league_table", ",", "league", ")", ":", "data", "=", "[", "]", "for", "team", "in", "league_table", "[", "'standings'", "]", "[", "0", "]", "[", "'table'", "]", ":", "item", "=", "{", "'position'", ":", "team", ...
Store output of league standings to a JSON file
[ "Store", "output", "of", "league", "standings", "to", "a", "JSON", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L319-L331
232,966
architv/soccer-cli
soccer/writers.py
Json.team_players
def team_players(self, team): """Store output of team players to a JSON file""" keys = 'shirtNumber name position nationality dateOfBirth'.split() data = [{key: player[key] for key in keys} for player in team] self.generate_output({'players': data})
python
def team_players(self, team): keys = 'shirtNumber name position nationality dateOfBirth'.split() data = [{key: player[key] for key in keys} for player in team] self.generate_output({'players': data})
[ "def", "team_players", "(", "self", ",", "team", ")", ":", "keys", "=", "'shirtNumber name position nationality dateOfBirth'", ".", "split", "(", ")", "data", "=", "[", "{", "key", ":", "player", "[", "key", "]", "for", "key", "in", "keys", "}", "for", "...
Store output of team players to a JSON file
[ "Store", "output", "of", "team", "players", "to", "a", "JSON", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L333-L337
232,967
architv/soccer-cli
soccer/writers.py
Json.league_scores
def league_scores(self, total_data, time): """Store output of fixtures based on league and time to a JSON file""" data = [] for league, score in self.supported_leagues(total_data): item = {'league': league, 'homeTeamName': score['homeTeamName'], 'goalsHomeTeam': score['result']['goalsHomeTeam'], 'goalsAwayTeam': score['result']['goalsAwayTeam'], 'awayTeamName': score['awayTeamName']} data.append(item) self.generate_output({'league_scores': data, 'time': time})
python
def league_scores(self, total_data, time): data = [] for league, score in self.supported_leagues(total_data): item = {'league': league, 'homeTeamName': score['homeTeamName'], 'goalsHomeTeam': score['result']['goalsHomeTeam'], 'goalsAwayTeam': score['result']['goalsAwayTeam'], 'awayTeamName': score['awayTeamName']} data.append(item) self.generate_output({'league_scores': data, 'time': time})
[ "def", "league_scores", "(", "self", ",", "total_data", ",", "time", ")", ":", "data", "=", "[", "]", "for", "league", ",", "score", "in", "self", ".", "supported_leagues", "(", "total_data", ")", ":", "item", "=", "{", "'league'", ":", "league", ",", ...
Store output of fixtures based on league and time to a JSON file
[ "Store", "output", "of", "fixtures", "based", "on", "league", "and", "time", "to", "a", "JSON", "file" ]
472e9f492f7633a8e9739e228a6c31de454da88b
https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/writers.py#L339-L348
232,968
pmneila/morphsnakes
examples.py
example_camera
def example_camera(): """ Example with `morphological_chan_vese` with using the default initialization of the level-set. """ logging.info('Running: example_camera (MorphACWE)...') # Load the image. img = imread(PATH_IMG_CAMERA)/255.0 # Callback for visual plotting callback = visual_callback_2d(img) # Morphological Chan-Vese (or ACWE) ms.morphological_chan_vese(img, 35, smoothing=3, lambda1=1, lambda2=1, iter_callback=callback)
python
def example_camera(): logging.info('Running: example_camera (MorphACWE)...') # Load the image. img = imread(PATH_IMG_CAMERA)/255.0 # Callback for visual plotting callback = visual_callback_2d(img) # Morphological Chan-Vese (or ACWE) ms.morphological_chan_vese(img, 35, smoothing=3, lambda1=1, lambda2=1, iter_callback=callback)
[ "def", "example_camera", "(", ")", ":", "logging", ".", "info", "(", "'Running: example_camera (MorphACWE)...'", ")", "# Load the image.", "img", "=", "imread", "(", "PATH_IMG_CAMERA", ")", "/", "255.0", "# Callback for visual plotting", "callback", "=", "visual_callbac...
Example with `morphological_chan_vese` with using the default initialization of the level-set.
[ "Example", "with", "morphological_chan_vese", "with", "using", "the", "default", "initialization", "of", "the", "level", "-", "set", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/examples.py#L226-L243
232,969
pmneila/morphsnakes
morphsnakes_v1.py
operator_si
def operator_si(u): """operator_si operator.""" global _aux if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") if u.shape != _aux.shape[1:]: _aux = np.zeros((len(P),) + u.shape) for _aux_i, P_i in zip(_aux, P): _aux_i[:] = binary_erosion(u, P_i) return _aux.max(0)
python
def operator_si(u): global _aux if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") if u.shape != _aux.shape[1:]: _aux = np.zeros((len(P),) + u.shape) for _aux_i, P_i in zip(_aux, P): _aux_i[:] = binary_erosion(u, P_i) return _aux.max(0)
[ "def", "operator_si", "(", "u", ")", ":", "global", "_aux", "if", "np", ".", "ndim", "(", "u", ")", "==", "2", ":", "P", "=", "_P2", "elif", "np", ".", "ndim", "(", "u", ")", "==", "3", ":", "P", "=", "_P3", "else", ":", "raise", "ValueError"...
operator_si operator.
[ "operator_si", "operator", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L74-L91
232,970
pmneila/morphsnakes
morphsnakes_v1.py
operator_is
def operator_is(u): """operator_is operator.""" global _aux if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") if u.shape != _aux.shape[1:]: _aux = np.zeros((len(P),) + u.shape) for _aux_i, P_i in zip(_aux, P): _aux_i[:] = binary_dilation(u, P_i) return _aux.min(0)
python
def operator_is(u): global _aux if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") if u.shape != _aux.shape[1:]: _aux = np.zeros((len(P),) + u.shape) for _aux_i, P_i in zip(_aux, P): _aux_i[:] = binary_dilation(u, P_i) return _aux.min(0)
[ "def", "operator_is", "(", "u", ")", ":", "global", "_aux", "if", "np", ".", "ndim", "(", "u", ")", "==", "2", ":", "P", "=", "_P2", "elif", "np", ".", "ndim", "(", "u", ")", "==", "3", ":", "P", "=", "_P3", "else", ":", "raise", "ValueError"...
operator_is operator.
[ "operator_is", "operator", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L94-L111
232,971
pmneila/morphsnakes
morphsnakes_v1.py
gborders
def gborders(img, alpha=1.0, sigma=1.0): """Stopping criterion for image borders.""" # The norm of the gradient. gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant') return 1.0/np.sqrt(1.0 + alpha*gradnorm)
python
def gborders(img, alpha=1.0, sigma=1.0): # The norm of the gradient. gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant') return 1.0/np.sqrt(1.0 + alpha*gradnorm)
[ "def", "gborders", "(", "img", ",", "alpha", "=", "1.0", ",", "sigma", "=", "1.0", ")", ":", "# The norm of the gradient.", "gradnorm", "=", "gaussian_gradient_magnitude", "(", "img", ",", "sigma", ",", "mode", "=", "'constant'", ")", "return", "1.0", "/", ...
Stopping criterion for image borders.
[ "Stopping", "criterion", "for", "image", "borders", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L121-L125
232,972
pmneila/morphsnakes
morphsnakes_v1.py
MorphACWE.step
def step(self): """Perform a single step of the morphological Chan-Vese evolution.""" # Assign attributes to local variables for convenience. u = self._u if u is None: raise ValueError("the levelset function is not set " "(use set_levelset)") data = self.data # Determine c0 and c1. inside = (u > 0) outside = (u <= 0) c0 = data[outside].sum() / float(outside.sum()) c1 = data[inside].sum() / float(inside.sum()) # Image attachment. dres = np.array(np.gradient(u)) abs_dres = np.abs(dres).sum(0) #aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data) aux = abs_dres * (self.lambda1*(data - c1) ** 2 - self.lambda2*(data - c0) ** 2) res = np.copy(u) res[aux < 0] = 1 res[aux > 0] = 0 # Smoothing. for i in range(self.smoothing): res = curvop(res) self._u = res
python
def step(self): # Assign attributes to local variables for convenience. u = self._u if u is None: raise ValueError("the levelset function is not set " "(use set_levelset)") data = self.data # Determine c0 and c1. inside = (u > 0) outside = (u <= 0) c0 = data[outside].sum() / float(outside.sum()) c1 = data[inside].sum() / float(inside.sum()) # Image attachment. dres = np.array(np.gradient(u)) abs_dres = np.abs(dres).sum(0) #aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data) aux = abs_dres * (self.lambda1*(data - c1) ** 2 - self.lambda2*(data - c0) ** 2) res = np.copy(u) res[aux < 0] = 1 res[aux > 0] = 0 # Smoothing. for i in range(self.smoothing): res = curvop(res) self._u = res
[ "def", "step", "(", "self", ")", ":", "# Assign attributes to local variables for convenience.", "u", "=", "self", ".", "_u", "if", "u", "is", "None", ":", "raise", "ValueError", "(", "\"the levelset function is not set \"", "\"(use set_levelset)\"", ")", "data", "=",...
Perform a single step of the morphological Chan-Vese evolution.
[ "Perform", "a", "single", "step", "of", "the", "morphological", "Chan", "-", "Vese", "evolution", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L168-L200
232,973
pmneila/morphsnakes
morphsnakes_v1.py
MorphGAC._update_mask
def _update_mask(self): """Pre-compute masks for speed.""" self._threshold_mask = self._data > self._theta self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
python
def _update_mask(self): self._threshold_mask = self._data > self._theta self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
[ "def", "_update_mask", "(", "self", ")", ":", "self", ".", "_threshold_mask", "=", "self", ".", "_data", ">", "self", ".", "_theta", "self", ".", "_threshold_mask_v", "=", "self", ".", "_data", ">", "self", ".", "_theta", "/", "np", ".", "abs", "(", ...
Pre-compute masks for speed.
[ "Pre", "-", "compute", "masks", "for", "speed", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L254-L257
232,974
pmneila/morphsnakes
morphsnakes_v1.py
MorphGAC.step
def step(self): """Perform a single step of the morphological snake evolution.""" # Assign attributes to local variables for convenience. u = self._u gI = self._data dgI = self._ddata theta = self._theta v = self._v if u is None: raise ValueError("the levelset is not set (use set_levelset)") res = np.copy(u) # Balloon. if v > 0: aux = binary_dilation(u, self.structure) elif v < 0: aux = binary_erosion(u, self.structure) if v!= 0: res[self._threshold_mask_v] = aux[self._threshold_mask_v] # Image attachment. aux = np.zeros_like(res) dres = np.gradient(res) for el1, el2 in zip(dgI, dres): aux += el1*el2 res[aux > 0] = 1 res[aux < 0] = 0 # Smoothing. for i in range(self.smoothing): res = curvop(res) self._u = res
python
def step(self): # Assign attributes to local variables for convenience. u = self._u gI = self._data dgI = self._ddata theta = self._theta v = self._v if u is None: raise ValueError("the levelset is not set (use set_levelset)") res = np.copy(u) # Balloon. if v > 0: aux = binary_dilation(u, self.structure) elif v < 0: aux = binary_erosion(u, self.structure) if v!= 0: res[self._threshold_mask_v] = aux[self._threshold_mask_v] # Image attachment. aux = np.zeros_like(res) dres = np.gradient(res) for el1, el2 in zip(dgI, dres): aux += el1*el2 res[aux > 0] = 1 res[aux < 0] = 0 # Smoothing. for i in range(self.smoothing): res = curvop(res) self._u = res
[ "def", "step", "(", "self", ")", ":", "# Assign attributes to local variables for convenience.", "u", "=", "self", ".", "_u", "gI", "=", "self", ".", "_data", "dgI", "=", "self", ".", "_ddata", "theta", "=", "self", ".", "_theta", "v", "=", "self", ".", ...
Perform a single step of the morphological snake evolution.
[ "Perform", "a", "single", "step", "of", "the", "morphological", "snake", "evolution", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L274-L308
232,975
pmneila/morphsnakes
morphsnakes.py
sup_inf
def sup_inf(u): """SI operator.""" if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") erosions = [] for P_i in P: erosions.append(ndi.binary_erosion(u, P_i)) return np.array(erosions, dtype=np.int8).max(0)
python
def sup_inf(u): if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") erosions = [] for P_i in P: erosions.append(ndi.binary_erosion(u, P_i)) return np.array(erosions, dtype=np.int8).max(0)
[ "def", "sup_inf", "(", "u", ")", ":", "if", "np", ".", "ndim", "(", "u", ")", "==", "2", ":", "P", "=", "_P2", "elif", "np", ".", "ndim", "(", "u", ")", "==", "3", ":", "P", "=", "_P3", "else", ":", "raise", "ValueError", "(", "\"u has an inv...
SI operator.
[ "SI", "operator", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L100-L115
232,976
pmneila/morphsnakes
morphsnakes.py
inf_sup
def inf_sup(u): """IS operator.""" if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") dilations = [] for P_i in P: dilations.append(ndi.binary_dilation(u, P_i)) return np.array(dilations, dtype=np.int8).min(0)
python
def inf_sup(u): if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") dilations = [] for P_i in P: dilations.append(ndi.binary_dilation(u, P_i)) return np.array(dilations, dtype=np.int8).min(0)
[ "def", "inf_sup", "(", "u", ")", ":", "if", "np", ".", "ndim", "(", "u", ")", "==", "2", ":", "P", "=", "_P2", "elif", "np", ".", "ndim", "(", "u", ")", "==", "3", ":", "P", "=", "_P3", "else", ":", "raise", "ValueError", "(", "\"u has an inv...
IS operator.
[ "IS", "operator", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L118-L133
232,977
pmneila/morphsnakes
morphsnakes.py
_check_input
def _check_input(image, init_level_set): """Check that shapes of `image` and `init_level_set` match.""" if not image.ndim in [2, 3]: raise ValueError("`image` must be a 2 or 3-dimensional array.") if len(image.shape) != len(init_level_set.shape): raise ValueError("The dimensions of the initial level set do not " "match the dimensions of the image.")
python
def _check_input(image, init_level_set): if not image.ndim in [2, 3]: raise ValueError("`image` must be a 2 or 3-dimensional array.") if len(image.shape) != len(init_level_set.shape): raise ValueError("The dimensions of the initial level set do not " "match the dimensions of the image.")
[ "def", "_check_input", "(", "image", ",", "init_level_set", ")", ":", "if", "not", "image", ".", "ndim", "in", "[", "2", ",", "3", "]", ":", "raise", "ValueError", "(", "\"`image` must be a 2 or 3-dimensional array.\"", ")", "if", "len", "(", "image", ".", ...
Check that shapes of `image` and `init_level_set` match.
[ "Check", "that", "shapes", "of", "image", "and", "init_level_set", "match", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L140-L147
232,978
pmneila/morphsnakes
morphsnakes.py
_init_level_set
def _init_level_set(init_level_set, image_shape): """Auxiliary function for initializing level sets with a string. If `init_level_set` is not a string, it is returned as is. """ if isinstance(init_level_set, str): if init_level_set == 'checkerboard': res = checkerboard_level_set(image_shape) elif init_level_set == 'circle': res = circle_level_set(image_shape) else: raise ValueError("`init_level_set` not in " "['checkerboard', 'circle']") else: res = init_level_set return res
python
def _init_level_set(init_level_set, image_shape): if isinstance(init_level_set, str): if init_level_set == 'checkerboard': res = checkerboard_level_set(image_shape) elif init_level_set == 'circle': res = circle_level_set(image_shape) else: raise ValueError("`init_level_set` not in " "['checkerboard', 'circle']") else: res = init_level_set return res
[ "def", "_init_level_set", "(", "init_level_set", ",", "image_shape", ")", ":", "if", "isinstance", "(", "init_level_set", ",", "str", ")", ":", "if", "init_level_set", "==", "'checkerboard'", ":", "res", "=", "checkerboard_level_set", "(", "image_shape", ")", "e...
Auxiliary function for initializing level sets with a string. If `init_level_set` is not a string, it is returned as is.
[ "Auxiliary", "function", "for", "initializing", "level", "sets", "with", "a", "string", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L150-L165
232,979
pmneila/morphsnakes
morphsnakes.py
circle_level_set
def circle_level_set(image_shape, center=None, radius=None): """Create a circle level set with binary values. Parameters ---------- image_shape : tuple of positive integers Shape of the image center : tuple of positive integers, optional Coordinates of the center of the circle given in (row, column). If not given, it defaults to the center of the image. radius : float, optional Radius of the circle. If not given, it is set to the 75% of the smallest image dimension. Returns ------- out : array with shape `image_shape` Binary level set of the circle with the given `radius` and `center`. See also -------- checkerboard_level_set """ if center is None: center = tuple(i // 2 for i in image_shape) if radius is None: radius = min(image_shape) * 3.0 / 8.0 grid = np.mgrid[[slice(i) for i in image_shape]] grid = (grid.T - center).T phi = radius - np.sqrt(np.sum((grid)**2, 0)) res = np.int8(phi > 0) return res
python
def circle_level_set(image_shape, center=None, radius=None): if center is None: center = tuple(i // 2 for i in image_shape) if radius is None: radius = min(image_shape) * 3.0 / 8.0 grid = np.mgrid[[slice(i) for i in image_shape]] grid = (grid.T - center).T phi = radius - np.sqrt(np.sum((grid)**2, 0)) res = np.int8(phi > 0) return res
[ "def", "circle_level_set", "(", "image_shape", ",", "center", "=", "None", ",", "radius", "=", "None", ")", ":", "if", "center", "is", "None", ":", "center", "=", "tuple", "(", "i", "//", "2", "for", "i", "in", "image_shape", ")", "if", "radius", "is...
Create a circle level set with binary values. Parameters ---------- image_shape : tuple of positive integers Shape of the image center : tuple of positive integers, optional Coordinates of the center of the circle given in (row, column). If not given, it defaults to the center of the image. radius : float, optional Radius of the circle. If not given, it is set to the 75% of the smallest image dimension. Returns ------- out : array with shape `image_shape` Binary level set of the circle with the given `radius` and `center`. See also -------- checkerboard_level_set
[ "Create", "a", "circle", "level", "set", "with", "binary", "values", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L168-L202
232,980
pmneila/morphsnakes
morphsnakes.py
checkerboard_level_set
def checkerboard_level_set(image_shape, square_size=5): """Create a checkerboard level set with binary values. Parameters ---------- image_shape : tuple of positive integers Shape of the image. square_size : int, optional Size of the squares of the checkerboard. It defaults to 5. Returns ------- out : array with shape `image_shape` Binary level set of the checkerboard. See also -------- circle_level_set """ grid = np.ogrid[[slice(i) for i in image_shape]] grid = [(grid_i // square_size) & 1 for grid_i in grid] checkerboard = np.bitwise_xor.reduce(grid, axis=0) res = np.int8(checkerboard) return res
python
def checkerboard_level_set(image_shape, square_size=5): grid = np.ogrid[[slice(i) for i in image_shape]] grid = [(grid_i // square_size) & 1 for grid_i in grid] checkerboard = np.bitwise_xor.reduce(grid, axis=0) res = np.int8(checkerboard) return res
[ "def", "checkerboard_level_set", "(", "image_shape", ",", "square_size", "=", "5", ")", ":", "grid", "=", "np", ".", "ogrid", "[", "[", "slice", "(", "i", ")", "for", "i", "in", "image_shape", "]", "]", "grid", "=", "[", "(", "grid_i", "//", "square_...
Create a checkerboard level set with binary values. Parameters ---------- image_shape : tuple of positive integers Shape of the image. square_size : int, optional Size of the squares of the checkerboard. It defaults to 5. Returns ------- out : array with shape `image_shape` Binary level set of the checkerboard. See also -------- circle_level_set
[ "Create", "a", "checkerboard", "level", "set", "with", "binary", "values", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L205-L230
232,981
pmneila/morphsnakes
morphsnakes.py
inverse_gaussian_gradient
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0): """Inverse of gradient magnitude. Compute the magnitude of the gradients in the image and then inverts the result in the range [0, 1]. Flat areas are assigned values close to 1, while areas close to borders are assigned values close to 0. This function or a similar one defined by the user should be applied over the image as a preprocessing step before calling `morphological_geodesic_active_contour`. Parameters ---------- image : (M, N) or (L, M, N) array Grayscale image or volume. alpha : float, optional Controls the steepness of the inversion. A larger value will make the transition between the flat areas and border areas steeper in the resulting array. sigma : float, optional Standard deviation of the Gaussian filter applied over the image. Returns ------- gimage : (M, N) or (L, M, N) array Preprocessed image (or volume) suitable for `morphological_geodesic_active_contour`. """ gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest') return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
python
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0): gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest') return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
[ "def", "inverse_gaussian_gradient", "(", "image", ",", "alpha", "=", "100.0", ",", "sigma", "=", "5.0", ")", ":", "gradnorm", "=", "ndi", ".", "gaussian_gradient_magnitude", "(", "image", ",", "sigma", ",", "mode", "=", "'nearest'", ")", "return", "1.0", "...
Inverse of gradient magnitude. Compute the magnitude of the gradients in the image and then inverts the result in the range [0, 1]. Flat areas are assigned values close to 1, while areas close to borders are assigned values close to 0. This function or a similar one defined by the user should be applied over the image as a preprocessing step before calling `morphological_geodesic_active_contour`. Parameters ---------- image : (M, N) or (L, M, N) array Grayscale image or volume. alpha : float, optional Controls the steepness of the inversion. A larger value will make the transition between the flat areas and border areas steeper in the resulting array. sigma : float, optional Standard deviation of the Gaussian filter applied over the image. Returns ------- gimage : (M, N) or (L, M, N) array Preprocessed image (or volume) suitable for `morphological_geodesic_active_contour`.
[ "Inverse", "of", "gradient", "magnitude", "." ]
aab66e70f86308d7b1927d76869a1a562120f849
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L233-L262
232,982
teddziuba/django-sslserver
sslserver/management/commands/runsslserver.py
Command.get_handler
def get_handler(self, *args, **options): """ Returns the static files serving handler wrapping the default handler, if static files should be served. Otherwise just returns the default handler. """ handler = super(Command, self).get_handler(*args, **options) insecure_serving = options.get('insecure_serving', False) if self.should_use_static_handler(options): return StaticFilesHandler(handler) return handler
python
def get_handler(self, *args, **options): handler = super(Command, self).get_handler(*args, **options) insecure_serving = options.get('insecure_serving', False) if self.should_use_static_handler(options): return StaticFilesHandler(handler) return handler
[ "def", "get_handler", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "handler", "=", "super", "(", "Command", ",", "self", ")", ".", "get_handler", "(", "*", "args", ",", "*", "*", "options", ")", "insecure_serving", "=", "options...
Returns the static files serving handler wrapping the default handler, if static files should be served. Otherwise just returns the default handler.
[ "Returns", "the", "static", "files", "serving", "handler", "wrapping", "the", "default", "handler", "if", "static", "files", "should", "be", "served", ".", "Otherwise", "just", "returns", "the", "default", "handler", "." ]
87515e48f184175175a7c571a220659124363a44
https://github.com/teddziuba/django-sslserver/blob/87515e48f184175175a7c571a220659124363a44/sslserver/management/commands/runsslserver.py#L68-L79
232,983
ethereum/eth-account
eth_account/account.py
Account.privateKeyToAccount
def privateKeyToAccount(self, private_key): ''' Returns a convenient object for working with the given private key. :param private_key: The raw private key :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :return: object with methods for signing and encrypting :rtype: LocalAccount .. code-block:: python >>> acct = Account.privateKeyToAccount( 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364) >>> acct.address '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' >>> acct.privateKey b"\\xb2\\}\\xb3\\x1f\\xee\\xd9\\x12''\\xbf\\t9\\xdcv\\x9a\\x96VK-\\xe4\\xc4rm\\x03[6\\xec\\xf1\\xe5\\xb3d" # These methods are also available: signHash(), signTransaction(), encrypt() # They correspond to the same-named methods in Account.* # but without the private key argument ''' key = self._parsePrivateKey(private_key) return LocalAccount(key, self)
python
def privateKeyToAccount(self, private_key): ''' Returns a convenient object for working with the given private key. :param private_key: The raw private key :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :return: object with methods for signing and encrypting :rtype: LocalAccount .. code-block:: python >>> acct = Account.privateKeyToAccount( 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364) >>> acct.address '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' >>> acct.privateKey b"\\xb2\\}\\xb3\\x1f\\xee\\xd9\\x12''\\xbf\\t9\\xdcv\\x9a\\x96VK-\\xe4\\xc4rm\\x03[6\\xec\\xf1\\xe5\\xb3d" # These methods are also available: signHash(), signTransaction(), encrypt() # They correspond to the same-named methods in Account.* # but without the private key argument ''' key = self._parsePrivateKey(private_key) return LocalAccount(key, self)
[ "def", "privateKeyToAccount", "(", "self", ",", "private_key", ")", ":", "key", "=", "self", ".", "_parsePrivateKey", "(", "private_key", ")", "return", "LocalAccount", "(", "key", ",", "self", ")" ]
Returns a convenient object for working with the given private key. :param private_key: The raw private key :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :return: object with methods for signing and encrypting :rtype: LocalAccount .. code-block:: python >>> acct = Account.privateKeyToAccount( 0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364) >>> acct.address '0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E' >>> acct.privateKey b"\\xb2\\}\\xb3\\x1f\\xee\\xd9\\x12''\\xbf\\t9\\xdcv\\x9a\\x96VK-\\xe4\\xc4rm\\x03[6\\xec\\xf1\\xe5\\xb3d" # These methods are also available: signHash(), signTransaction(), encrypt() # They correspond to the same-named methods in Account.* # but without the private key argument
[ "Returns", "a", "convenient", "object", "for", "working", "with", "the", "given", "private", "key", "." ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/account.py#L199-L222
232,984
ethereum/eth-account
eth_account/account.py
Account.recoverTransaction
def recoverTransaction(self, serialized_transaction): ''' Get the address of the account that signed this transaction. :param serialized_transaction: the complete signed transaction :type serialized_transaction: hex str, bytes or int :returns: address of signer, hex-encoded & checksummed :rtype: str .. code-block:: python >>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428', # noqa: E501 >>> Account.recoverTransaction(raw_transaction) '0x2c7536E3605D9C16a7a3D7b1898e529396a65c23' ''' txn_bytes = HexBytes(serialized_transaction) txn = Transaction.from_bytes(txn_bytes) msg_hash = hash_of_signed_transaction(txn) return self.recoverHash(msg_hash, vrs=vrs_from(txn))
python
def recoverTransaction(self, serialized_transaction): ''' Get the address of the account that signed this transaction. :param serialized_transaction: the complete signed transaction :type serialized_transaction: hex str, bytes or int :returns: address of signer, hex-encoded & checksummed :rtype: str .. code-block:: python >>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428', # noqa: E501 >>> Account.recoverTransaction(raw_transaction) '0x2c7536E3605D9C16a7a3D7b1898e529396a65c23' ''' txn_bytes = HexBytes(serialized_transaction) txn = Transaction.from_bytes(txn_bytes) msg_hash = hash_of_signed_transaction(txn) return self.recoverHash(msg_hash, vrs=vrs_from(txn))
[ "def", "recoverTransaction", "(", "self", ",", "serialized_transaction", ")", ":", "txn_bytes", "=", "HexBytes", "(", "serialized_transaction", ")", "txn", "=", "Transaction", ".", "from_bytes", "(", "txn_bytes", ")", "msg_hash", "=", "hash_of_signed_transaction", "...
Get the address of the account that signed this transaction. :param serialized_transaction: the complete signed transaction :type serialized_transaction: hex str, bytes or int :returns: address of signer, hex-encoded & checksummed :rtype: str .. code-block:: python >>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428', # noqa: E501 >>> Account.recoverTransaction(raw_transaction) '0x2c7536E3605D9C16a7a3D7b1898e529396a65c23'
[ "Get", "the", "address", "of", "the", "account", "that", "signed", "this", "transaction", "." ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/account.py#L300-L318
232,985
ethereum/eth-account
eth_account/account.py
Account.signHash
def signHash(self, message_hash, private_key): ''' Sign the hash provided. .. WARNING:: *Never* sign a hash that you didn't generate, it can be an arbitrary transaction. For example, it might send all of your account's ether to an attacker. If you would like compatibility with :meth:`w3.eth.sign() <web3.eth.Eth.sign>` you can use :meth:`~eth_account.messages.defunct_hash_message`. Several other message standards are proposed, but none have a clear consensus. You'll need to manually comply with any of those message standards manually. :param message_hash: the 32-byte message hash to be signed :type message_hash: hex str, bytes or int :param private_key: the key to sign the message with :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :returns: Various details about the signature - most importantly the fields: v, r, and s :rtype: ~eth_account.datastructures.AttributeDict .. code-block:: python >>> msg = "I♥SF" >>> from eth_account.messages import defunct_hash_message >>> msghash = defunct_hash_message(text=msg) HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750') >>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364" >>> Account.signHash(msghash, key) {'messageHash': HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'), # noqa: E501 'r': 104389933075820307925104709181714897380569894203213074526835978196648170704563, 's': 28205917190874851400050446352651915501321657673772411533993420917949420456142, 'signature': HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c'), # noqa: E501 'v': 28} # these are equivalent: >>> Account.signHash( 0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750, key ) >>> Account.signHash( "0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750", key ) ''' msg_hash_bytes = HexBytes(message_hash) if len(msg_hash_bytes) != 32: raise ValueError("The message hash must be exactly 32-bytes") key = self._parsePrivateKey(private_key) (v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash_bytes) return AttributeDict({ 'messageHash': msg_hash_bytes, 'r': r, 's': s, 'v': v, 'signature': HexBytes(eth_signature_bytes), })
python
def signHash(self, message_hash, private_key): ''' Sign the hash provided. .. WARNING:: *Never* sign a hash that you didn't generate, it can be an arbitrary transaction. For example, it might send all of your account's ether to an attacker. If you would like compatibility with :meth:`w3.eth.sign() <web3.eth.Eth.sign>` you can use :meth:`~eth_account.messages.defunct_hash_message`. Several other message standards are proposed, but none have a clear consensus. You'll need to manually comply with any of those message standards manually. :param message_hash: the 32-byte message hash to be signed :type message_hash: hex str, bytes or int :param private_key: the key to sign the message with :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :returns: Various details about the signature - most importantly the fields: v, r, and s :rtype: ~eth_account.datastructures.AttributeDict .. code-block:: python >>> msg = "I♥SF" >>> from eth_account.messages import defunct_hash_message >>> msghash = defunct_hash_message(text=msg) HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750') >>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364" >>> Account.signHash(msghash, key) {'messageHash': HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'), # noqa: E501 'r': 104389933075820307925104709181714897380569894203213074526835978196648170704563, 's': 28205917190874851400050446352651915501321657673772411533993420917949420456142, 'signature': HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c'), # noqa: E501 'v': 28} # these are equivalent: >>> Account.signHash( 0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750, key ) >>> Account.signHash( "0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750", key ) ''' msg_hash_bytes = HexBytes(message_hash) if len(msg_hash_bytes) != 32: raise ValueError("The message hash must be exactly 32-bytes") key = self._parsePrivateKey(private_key) (v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash_bytes) return AttributeDict({ 'messageHash': msg_hash_bytes, 'r': r, 's': s, 'v': v, 'signature': HexBytes(eth_signature_bytes), })
[ "def", "signHash", "(", "self", ",", "message_hash", ",", "private_key", ")", ":", "msg_hash_bytes", "=", "HexBytes", "(", "message_hash", ")", "if", "len", "(", "msg_hash_bytes", ")", "!=", "32", ":", "raise", "ValueError", "(", "\"The message hash must be exac...
Sign the hash provided. .. WARNING:: *Never* sign a hash that you didn't generate, it can be an arbitrary transaction. For example, it might send all of your account's ether to an attacker. If you would like compatibility with :meth:`w3.eth.sign() <web3.eth.Eth.sign>` you can use :meth:`~eth_account.messages.defunct_hash_message`. Several other message standards are proposed, but none have a clear consensus. You'll need to manually comply with any of those message standards manually. :param message_hash: the 32-byte message hash to be signed :type message_hash: hex str, bytes or int :param private_key: the key to sign the message with :type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey` :returns: Various details about the signature - most importantly the fields: v, r, and s :rtype: ~eth_account.datastructures.AttributeDict .. code-block:: python >>> msg = "I♥SF" >>> from eth_account.messages import defunct_hash_message >>> msghash = defunct_hash_message(text=msg) HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750') >>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364" >>> Account.signHash(msghash, key) {'messageHash': HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'), # noqa: E501 'r': 104389933075820307925104709181714897380569894203213074526835978196648170704563, 's': 28205917190874851400050446352651915501321657673772411533993420917949420456142, 'signature': HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c'), # noqa: E501 'v': 28} # these are equivalent: >>> Account.signHash( 0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750, key ) >>> Account.signHash( "0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750", key )
[ "Sign", "the", "hash", "provided", "." ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/account.py#L333-L393
232,986
ethereum/eth-account
eth_account/_utils/structured_data/hashing.py
get_dependencies
def get_dependencies(primary_type, types): """ Perform DFS to get all the dependencies of the primary_type """ deps = set() struct_names_yet_to_be_expanded = [primary_type] while len(struct_names_yet_to_be_expanded) > 0: struct_name = struct_names_yet_to_be_expanded.pop() deps.add(struct_name) fields = types[struct_name] for field in fields: if field["type"] not in types: # We don't need to expand types that are not user defined (customized) continue elif field["type"] in deps: # skip types that we have already encountered continue else: # Custom Struct Type struct_names_yet_to_be_expanded.append(field["type"]) # Don't need to make a struct as dependency of itself deps.remove(primary_type) return tuple(deps)
python
def get_dependencies(primary_type, types): deps = set() struct_names_yet_to_be_expanded = [primary_type] while len(struct_names_yet_to_be_expanded) > 0: struct_name = struct_names_yet_to_be_expanded.pop() deps.add(struct_name) fields = types[struct_name] for field in fields: if field["type"] not in types: # We don't need to expand types that are not user defined (customized) continue elif field["type"] in deps: # skip types that we have already encountered continue else: # Custom Struct Type struct_names_yet_to_be_expanded.append(field["type"]) # Don't need to make a struct as dependency of itself deps.remove(primary_type) return tuple(deps)
[ "def", "get_dependencies", "(", "primary_type", ",", "types", ")", ":", "deps", "=", "set", "(", ")", "struct_names_yet_to_be_expanded", "=", "[", "primary_type", "]", "while", "len", "(", "struct_names_yet_to_be_expanded", ")", ">", "0", ":", "struct_name", "="...
Perform DFS to get all the dependencies of the primary_type
[ "Perform", "DFS", "to", "get", "all", "the", "dependencies", "of", "the", "primary_type" ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L28-L54
232,987
ethereum/eth-account
eth_account/_utils/structured_data/hashing.py
is_valid_abi_type
def is_valid_abi_type(type_name): """ This function is used to make sure that the ``type_name`` is a valid ABI Type. Please note that this is a temporary function and should be replaced by the corresponding ABI function, once the following issue has been resolved. https://github.com/ethereum/eth-abi/issues/125 """ valid_abi_types = {"address", "bool", "bytes", "int", "string", "uint"} is_bytesN = type_name.startswith("bytes") and 1 <= int(type_name[5:]) <= 32 is_intN = ( type_name.startswith("int") and 8 <= int(type_name[3:]) <= 256 and int(type_name[3:]) % 8 == 0 ) is_uintN = ( type_name.startswith("uint") and 8 <= int(type_name[4:]) <= 256 and int(type_name[4:]) % 8 == 0 ) if type_name in valid_abi_types: return True elif is_bytesN: # bytes1 to bytes32 return True elif is_intN: # int8 to int256 return True elif is_uintN: # uint8 to uint256 return True return False
python
def is_valid_abi_type(type_name): valid_abi_types = {"address", "bool", "bytes", "int", "string", "uint"} is_bytesN = type_name.startswith("bytes") and 1 <= int(type_name[5:]) <= 32 is_intN = ( type_name.startswith("int") and 8 <= int(type_name[3:]) <= 256 and int(type_name[3:]) % 8 == 0 ) is_uintN = ( type_name.startswith("uint") and 8 <= int(type_name[4:]) <= 256 and int(type_name[4:]) % 8 == 0 ) if type_name in valid_abi_types: return True elif is_bytesN: # bytes1 to bytes32 return True elif is_intN: # int8 to int256 return True elif is_uintN: # uint8 to uint256 return True return False
[ "def", "is_valid_abi_type", "(", "type_name", ")", ":", "valid_abi_types", "=", "{", "\"address\"", ",", "\"bool\"", ",", "\"bytes\"", ",", "\"int\"", ",", "\"string\"", ",", "\"uint\"", "}", "is_bytesN", "=", "type_name", ".", "startswith", "(", "\"bytes\"", ...
This function is used to make sure that the ``type_name`` is a valid ABI Type. Please note that this is a temporary function and should be replaced by the corresponding ABI function, once the following issue has been resolved. https://github.com/ethereum/eth-abi/issues/125
[ "This", "function", "is", "used", "to", "make", "sure", "that", "the", "type_name", "is", "a", "valid", "ABI", "Type", "." ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L94-L127
232,988
ethereum/eth-account
eth_account/_utils/structured_data/hashing.py
get_depths_and_dimensions
def get_depths_and_dimensions(data, depth): """ Yields 2-length tuples of depth and dimension of each element at that depth """ if not isinstance(data, (list, tuple)): # Not checking for Iterable instance, because even Dictionaries and strings # are considered as iterables, but that's not what we want the condition to be. return () yield depth, len(data) for item in data: # iterating over all 1 dimension less sub-data items yield from get_depths_and_dimensions(item, depth + 1)
python
def get_depths_and_dimensions(data, depth): if not isinstance(data, (list, tuple)): # Not checking for Iterable instance, because even Dictionaries and strings # are considered as iterables, but that's not what we want the condition to be. return () yield depth, len(data) for item in data: # iterating over all 1 dimension less sub-data items yield from get_depths_and_dimensions(item, depth + 1)
[ "def", "get_depths_and_dimensions", "(", "data", ",", "depth", ")", ":", "if", "not", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", ":", "# Not checking for Iterable instance, because even Dictionaries and strings", "# are considered as iterables, b...
Yields 2-length tuples of depth and dimension of each element at that depth
[ "Yields", "2", "-", "length", "tuples", "of", "depth", "and", "dimension", "of", "each", "element", "at", "that", "depth" ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L137-L150
232,989
ethereum/eth-account
eth_account/_utils/signing.py
hash_of_signed_transaction
def hash_of_signed_transaction(txn_obj): ''' Regenerate the hash of the signed transaction object. 1. Infer the chain ID from the signature 2. Strip out signature from transaction 3. Annotate the transaction with that ID, if available 4. Take the hash of the serialized, unsigned, chain-aware transaction Chain ID inference and annotation is according to EIP-155 See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md :return: the hash of the provided transaction, to be signed ''' (chain_id, _v) = extract_chain_id(txn_obj.v) unsigned_parts = strip_signature(txn_obj) if chain_id is None: signable_transaction = UnsignedTransaction(*unsigned_parts) else: extended_transaction = unsigned_parts + [chain_id, 0, 0] signable_transaction = ChainAwareUnsignedTransaction(*extended_transaction) return signable_transaction.hash()
python
def hash_of_signed_transaction(txn_obj): ''' Regenerate the hash of the signed transaction object. 1. Infer the chain ID from the signature 2. Strip out signature from transaction 3. Annotate the transaction with that ID, if available 4. Take the hash of the serialized, unsigned, chain-aware transaction Chain ID inference and annotation is according to EIP-155 See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md :return: the hash of the provided transaction, to be signed ''' (chain_id, _v) = extract_chain_id(txn_obj.v) unsigned_parts = strip_signature(txn_obj) if chain_id is None: signable_transaction = UnsignedTransaction(*unsigned_parts) else: extended_transaction = unsigned_parts + [chain_id, 0, 0] signable_transaction = ChainAwareUnsignedTransaction(*extended_transaction) return signable_transaction.hash()
[ "def", "hash_of_signed_transaction", "(", "txn_obj", ")", ":", "(", "chain_id", ",", "_v", ")", "=", "extract_chain_id", "(", "txn_obj", ".", "v", ")", "unsigned_parts", "=", "strip_signature", "(", "txn_obj", ")", "if", "chain_id", "is", "None", ":", "signa...
Regenerate the hash of the signed transaction object. 1. Infer the chain ID from the signature 2. Strip out signature from transaction 3. Annotate the transaction with that ID, if available 4. Take the hash of the serialized, unsigned, chain-aware transaction Chain ID inference and annotation is according to EIP-155 See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md :return: the hash of the provided transaction, to be signed
[ "Regenerate", "the", "hash", "of", "the", "signed", "transaction", "object", "." ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/signing.py#L96-L117
232,990
ethereum/eth-account
eth_account/_utils/signing.py
extract_chain_id
def extract_chain_id(raw_v): ''' Extracts chain ID, according to EIP-155 @return (chain_id, v) ''' above_id_offset = raw_v - CHAIN_ID_OFFSET if above_id_offset < 0: if raw_v in {0, 1}: return (None, raw_v + V_OFFSET) elif raw_v in {27, 28}: return (None, raw_v) else: raise ValueError("v %r is invalid, must be one of: 0, 1, 27, 28, 35+") else: (chain_id, v_bit) = divmod(above_id_offset, 2) return (chain_id, v_bit + V_OFFSET)
python
def extract_chain_id(raw_v): ''' Extracts chain ID, according to EIP-155 @return (chain_id, v) ''' above_id_offset = raw_v - CHAIN_ID_OFFSET if above_id_offset < 0: if raw_v in {0, 1}: return (None, raw_v + V_OFFSET) elif raw_v in {27, 28}: return (None, raw_v) else: raise ValueError("v %r is invalid, must be one of: 0, 1, 27, 28, 35+") else: (chain_id, v_bit) = divmod(above_id_offset, 2) return (chain_id, v_bit + V_OFFSET)
[ "def", "extract_chain_id", "(", "raw_v", ")", ":", "above_id_offset", "=", "raw_v", "-", "CHAIN_ID_OFFSET", "if", "above_id_offset", "<", "0", ":", "if", "raw_v", "in", "{", "0", ",", "1", "}", ":", "return", "(", "None", ",", "raw_v", "+", "V_OFFSET", ...
Extracts chain ID, according to EIP-155 @return (chain_id, v)
[ "Extracts", "chain", "ID", "according", "to", "EIP", "-", "155" ]
335199b815ae34fea87f1523e2f29777fd52946e
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/signing.py#L120-L135
232,991
llazzaro/django-scheduler
schedule/views.py
get_occurrence
def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): """ Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used. """ if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
python
def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
[ "def", "get_occurrence", "(", "event_id", ",", "occurrence_id", "=", "None", ",", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hour", "=", "None", ",", "minute", "=", "None", ",", "second", "=", "None", ",", "tzinfo"...
Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used.
[ "Because", "occurrences", "don", "t", "have", "to", "be", "persisted", "there", "must", "be", "two", "ways", "to", "retrieve", "them", ".", "both", "need", "an", "event", "but", "if", "its", "persisted", "the", "occurrence", "can", "be", "retrieved", "with...
0530b74a5fc0b1125645002deaa4da2337ed0f17
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/views.py#L251-L274
232,992
llazzaro/django-scheduler
schedule/models/calendars.py
CalendarManager.get_calendars_for_object
def get_calendars_for_object(self, obj, distinction=''): """ This function allows you to get calendars for a specific object If distinction is set it will filter out any relation that doesnt have that distinction. """ ct = ContentType.objects.get_for_model(obj) if distinction: dist_q = Q(calendarrelation__distinction=distinction) else: dist_q = Q() return self.filter(dist_q, calendarrelation__content_type=ct, calendarrelation__object_id=obj.id)
python
def get_calendars_for_object(self, obj, distinction=''): ct = ContentType.objects.get_for_model(obj) if distinction: dist_q = Q(calendarrelation__distinction=distinction) else: dist_q = Q() return self.filter(dist_q, calendarrelation__content_type=ct, calendarrelation__object_id=obj.id)
[ "def", "get_calendars_for_object", "(", "self", ",", "obj", ",", "distinction", "=", "''", ")", ":", "ct", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", "if", "distinction", ":", "dist_q", "=", "Q", "(", "calendarrelation__distin...
This function allows you to get calendars for a specific object If distinction is set it will filter out any relation that doesnt have that distinction.
[ "This", "function", "allows", "you", "to", "get", "calendars", "for", "a", "specific", "object" ]
0530b74a5fc0b1125645002deaa4da2337ed0f17
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/models/calendars.py#L91-L103
232,993
llazzaro/django-scheduler
schedule/models/calendars.py
CalendarRelationManager.create_relation
def create_relation(self, calendar, content_object, distinction='', inheritable=True): """ Creates a relation between calendar and content_object. See CalendarRelation for help on distinction and inheritable """ return CalendarRelation.objects.create( calendar=calendar, distinction=distinction, content_object=content_object)
python
def create_relation(self, calendar, content_object, distinction='', inheritable=True): return CalendarRelation.objects.create( calendar=calendar, distinction=distinction, content_object=content_object)
[ "def", "create_relation", "(", "self", ",", "calendar", ",", "content_object", ",", "distinction", "=", "''", ",", "inheritable", "=", "True", ")", ":", "return", "CalendarRelation", ".", "objects", ".", "create", "(", "calendar", "=", "calendar", ",", "dist...
Creates a relation between calendar and content_object. See CalendarRelation for help on distinction and inheritable
[ "Creates", "a", "relation", "between", "calendar", "and", "content_object", ".", "See", "CalendarRelation", "for", "help", "on", "distinction", "and", "inheritable" ]
0530b74a5fc0b1125645002deaa4da2337ed0f17
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/models/calendars.py#L186-L194
232,994
llazzaro/django-scheduler
schedule/utils.py
EventListManager.occurrences_after
def occurrences_after(self, after=None): """ It is often useful to know what the next occurrence is given a list of events. This function produces a generator that yields the the most recent occurrence after the date ``after`` from any of the events in ``self.events`` """ from schedule.models import Occurrence if after is None: after = timezone.now() occ_replacer = OccurrenceReplacer( Occurrence.objects.filter(event__in=self.events)) generators = [event._occurrences_after_generator(after) for event in self.events] occurrences = [] for generator in generators: try: heapq.heappush(occurrences, (next(generator), generator)) except StopIteration: pass while occurrences: generator = occurrences[0][1] try: next_occurrence = heapq.heapreplace(occurrences, (next(generator), generator))[0] except StopIteration: next_occurrence = heapq.heappop(occurrences)[0] yield occ_replacer.get_occurrence(next_occurrence)
python
def occurrences_after(self, after=None): from schedule.models import Occurrence if after is None: after = timezone.now() occ_replacer = OccurrenceReplacer( Occurrence.objects.filter(event__in=self.events)) generators = [event._occurrences_after_generator(after) for event in self.events] occurrences = [] for generator in generators: try: heapq.heappush(occurrences, (next(generator), generator)) except StopIteration: pass while occurrences: generator = occurrences[0][1] try: next_occurrence = heapq.heapreplace(occurrences, (next(generator), generator))[0] except StopIteration: next_occurrence = heapq.heappop(occurrences)[0] yield occ_replacer.get_occurrence(next_occurrence)
[ "def", "occurrences_after", "(", "self", ",", "after", "=", "None", ")", ":", "from", "schedule", ".", "models", "import", "Occurrence", "if", "after", "is", "None", ":", "after", "=", "timezone", ".", "now", "(", ")", "occ_replacer", "=", "OccurrenceRepla...
It is often useful to know what the next occurrence is given a list of events. This function produces a generator that yields the the most recent occurrence after the date ``after`` from any of the events in ``self.events``
[ "It", "is", "often", "useful", "to", "know", "what", "the", "next", "occurrence", "is", "given", "a", "list", "of", "events", ".", "This", "function", "produces", "a", "generator", "that", "yields", "the", "the", "most", "recent", "occurrence", "after", "t...
0530b74a5fc0b1125645002deaa4da2337ed0f17
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/utils.py#L24-L53
232,995
llazzaro/django-scheduler
schedule/models/events.py
EventRelationManager.get_events_for_object
def get_events_for_object(self, content_object, distinction='', inherit=True): ''' returns a queryset full of events, that relate to the object through, the distinction If inherit is false it will not consider the calendars that the events belong to. If inherit is true it will inherit all of the relations and distinctions that any calendar that it belongs to has, as long as the relation has inheritable set to True. (See Calendar) >>> event = Event.objects.get(title='Test1') >>> user = User.objects.get(username = 'alice') >>> EventRelation.objects.get_events_for_object(user, 'owner', inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] If a distinction is not declared it will not vet the relations based on distinction. >>> EventRelation.objects.get_events_for_object(user, inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] Now if there is a Calendar >>> calendar = Calendar(name = 'MyProject') >>> calendar.save() And an event that belongs to that calendar >>> event = Event.objects.get(title='Test2') >>> calendar.events.add(event) If we relate this calendar to some object with inheritable set to true, that relation will be inherited >>> user = User.objects.get(username='bob') >>> cr = calendar.create_relation(user, 'viewer', True) >>> EventRelation.objects.get_events_for_object(user, 'viewer') [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] ''' ct = ContentType.objects.get_for_model(type(content_object)) if distinction: dist_q = Q(eventrelation__distinction=distinction) cal_dist_q = Q(calendar__calendarrelation__distinction=distinction) else: dist_q = Q() cal_dist_q = Q() if inherit: inherit_q = Q( cal_dist_q, calendar__calendarrelation__content_type=ct, calendar__calendarrelation__object_id=content_object.id, calendar__calendarrelation__inheritable=True, ) else: inherit_q = Q() event_q = Q(dist_q, eventrelation__content_type=ct, eventrelation__object_id=content_object.id) return Event.objects.filter(inherit_q | event_q)
python
def get_events_for_object(self, content_object, distinction='', inherit=True): ''' returns a queryset full of events, that relate to the object through, the distinction If inherit is false it will not consider the calendars that the events belong to. If inherit is true it will inherit all of the relations and distinctions that any calendar that it belongs to has, as long as the relation has inheritable set to True. (See Calendar) >>> event = Event.objects.get(title='Test1') >>> user = User.objects.get(username = 'alice') >>> EventRelation.objects.get_events_for_object(user, 'owner', inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] If a distinction is not declared it will not vet the relations based on distinction. >>> EventRelation.objects.get_events_for_object(user, inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] Now if there is a Calendar >>> calendar = Calendar(name = 'MyProject') >>> calendar.save() And an event that belongs to that calendar >>> event = Event.objects.get(title='Test2') >>> calendar.events.add(event) If we relate this calendar to some object with inheritable set to true, that relation will be inherited >>> user = User.objects.get(username='bob') >>> cr = calendar.create_relation(user, 'viewer', True) >>> EventRelation.objects.get_events_for_object(user, 'viewer') [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] ''' ct = ContentType.objects.get_for_model(type(content_object)) if distinction: dist_q = Q(eventrelation__distinction=distinction) cal_dist_q = Q(calendar__calendarrelation__distinction=distinction) else: dist_q = Q() cal_dist_q = Q() if inherit: inherit_q = Q( cal_dist_q, calendar__calendarrelation__content_type=ct, calendar__calendarrelation__object_id=content_object.id, calendar__calendarrelation__inheritable=True, ) else: inherit_q = Q() event_q = Q(dist_q, eventrelation__content_type=ct, eventrelation__object_id=content_object.id) return Event.objects.filter(inherit_q | event_q)
[ "def", "get_events_for_object", "(", "self", ",", "content_object", ",", "distinction", "=", "''", ",", "inherit", "=", "True", ")", ":", "ct", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "type", "(", "content_object", ")", ")", "if", "d...
returns a queryset full of events, that relate to the object through, the distinction If inherit is false it will not consider the calendars that the events belong to. If inherit is true it will inherit all of the relations and distinctions that any calendar that it belongs to has, as long as the relation has inheritable set to True. (See Calendar) >>> event = Event.objects.get(title='Test1') >>> user = User.objects.get(username = 'alice') >>> EventRelation.objects.get_events_for_object(user, 'owner', inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] If a distinction is not declared it will not vet the relations based on distinction. >>> EventRelation.objects.get_events_for_object(user, inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] Now if there is a Calendar >>> calendar = Calendar(name = 'MyProject') >>> calendar.save() And an event that belongs to that calendar >>> event = Event.objects.get(title='Test2') >>> calendar.events.add(event) If we relate this calendar to some object with inheritable set to true, that relation will be inherited >>> user = User.objects.get(username='bob') >>> cr = calendar.create_relation(user, 'viewer', True) >>> EventRelation.objects.get_events_for_object(user, 'viewer') [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>]
[ "returns", "a", "queryset", "full", "of", "events", "that", "relate", "to", "the", "object", "through", "the", "distinction" ]
0530b74a5fc0b1125645002deaa4da2337ed0f17
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/models/events.py#L459-L511
232,996
llazzaro/django-scheduler
schedule/models/events.py
EventRelationManager.create_relation
def create_relation(self, event, content_object, distinction=''): """ Creates a relation between event and content_object. See EventRelation for help on distinction. """ return EventRelation.objects.create( event=event, distinction=distinction, content_object=content_object)
python
def create_relation(self, event, content_object, distinction=''): return EventRelation.objects.create( event=event, distinction=distinction, content_object=content_object)
[ "def", "create_relation", "(", "self", ",", "event", ",", "content_object", ",", "distinction", "=", "''", ")", ":", "return", "EventRelation", ".", "objects", ".", "create", "(", "event", "=", "event", ",", "distinction", "=", "distinction", ",", "content_o...
Creates a relation between event and content_object. See EventRelation for help on distinction.
[ "Creates", "a", "relation", "between", "event", "and", "content_object", ".", "See", "EventRelation", "for", "help", "on", "distinction", "." ]
0530b74a5fc0b1125645002deaa4da2337ed0f17
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/models/events.py#L513-L521
232,997
greyli/flask-ckeditor
examples/flask-admin/app.py
init_db
def init_db(): """ Populate a small db with some example entries. """ db.drop_all() db.create_all() # Create sample Post title = "de Finibus Bonorum et Malorum - Part I" text = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \ incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \ exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \ dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \ Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \ mollit anim id est laborum." post = Post(title=title, text=text) db.session.add(post) db.session.commit()
python
def init_db(): db.drop_all() db.create_all() # Create sample Post title = "de Finibus Bonorum et Malorum - Part I" text = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \ incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \ exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \ dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \ Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \ mollit anim id est laborum." post = Post(title=title, text=text) db.session.add(post) db.session.commit()
[ "def", "init_db", "(", ")", ":", "db", ".", "drop_all", "(", ")", "db", ".", "create_all", "(", ")", "# Create sample Post", "title", "=", "\"de Finibus Bonorum et Malorum - Part I\"", "text", "=", "\"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod ...
Populate a small db with some example entries.
[ "Populate", "a", "small", "db", "with", "some", "example", "entries", "." ]
a8a1aa0d5736271762700d06fe9dbc0f8ed43aec
https://github.com/greyli/flask-ckeditor/blob/a8a1aa0d5736271762700d06fe9dbc0f8ed43aec/examples/flask-admin/app.py#L42-L60
232,998
greyli/flask-ckeditor
flask_ckeditor/__init__.py
_CKEditor.load
def load(custom_url=None, pkg_type=None, serve_local=None, version='4.9.2'): """Load CKEditor resource from CDN or local. :param custom_url: The custom resource url to use, build your CKEditor on `CKEditor builder <https://ckeditor.com/cke4/builder>`_. :param pkg_type: The type of CKEditor package, one of ``basic``, ``standard`` and ``full``. Default to ``standard``. It's a mirror argument to overwrite ``CKEDITOR_PKG_TYPE``. :param serve_local: Mirror argument to overwrite ``CKEDITOR_SERVE_LOCAL``. :param version: The version of CKEditor. """ pkg_type = pkg_type or current_app.config['CKEDITOR_PKG_TYPE'] if pkg_type not in ['basic', 'standard', 'full']: warnings.warn('The provided pkg_type string was invalid, ' 'it should be one of basic/standard/full.') pkg_type = 'standard' if serve_local or current_app.config['CKEDITOR_SERVE_LOCAL']: url = url_for('ckeditor.static', filename='%s/ckeditor.js' % pkg_type) else: url = '//cdn.ckeditor.com/%s/%s/ckeditor.js' % (version, pkg_type) if custom_url: url = custom_url return Markup('<script src="%s"></script>' % url)
python
def load(custom_url=None, pkg_type=None, serve_local=None, version='4.9.2'): pkg_type = pkg_type or current_app.config['CKEDITOR_PKG_TYPE'] if pkg_type not in ['basic', 'standard', 'full']: warnings.warn('The provided pkg_type string was invalid, ' 'it should be one of basic/standard/full.') pkg_type = 'standard' if serve_local or current_app.config['CKEDITOR_SERVE_LOCAL']: url = url_for('ckeditor.static', filename='%s/ckeditor.js' % pkg_type) else: url = '//cdn.ckeditor.com/%s/%s/ckeditor.js' % (version, pkg_type) if custom_url: url = custom_url return Markup('<script src="%s"></script>' % url)
[ "def", "load", "(", "custom_url", "=", "None", ",", "pkg_type", "=", "None", ",", "serve_local", "=", "None", ",", "version", "=", "'4.9.2'", ")", ":", "pkg_type", "=", "pkg_type", "or", "current_app", ".", "config", "[", "'CKEDITOR_PKG_TYPE'", "]", "if", ...
Load CKEditor resource from CDN or local. :param custom_url: The custom resource url to use, build your CKEditor on `CKEditor builder <https://ckeditor.com/cke4/builder>`_. :param pkg_type: The type of CKEditor package, one of ``basic``, ``standard`` and ``full``. Default to ``standard``. It's a mirror argument to overwrite ``CKEDITOR_PKG_TYPE``. :param serve_local: Mirror argument to overwrite ``CKEDITOR_SERVE_LOCAL``. :param version: The version of CKEditor.
[ "Load", "CKEditor", "resource", "from", "CDN", "or", "local", "." ]
a8a1aa0d5736271762700d06fe9dbc0f8ed43aec
https://github.com/greyli/flask-ckeditor/blob/a8a1aa0d5736271762700d06fe9dbc0f8ed43aec/flask_ckeditor/__init__.py#L22-L47
232,999
awkman/pywifi
pywifi/wifi.py
PyWiFi.interfaces
def interfaces(self): """Collect the available wlan interfaces.""" self._ifaces = [] wifi_ctrl = wifiutil.WifiUtil() for interface in wifi_ctrl.interfaces(): iface = Interface(interface) self._ifaces.append(iface) self._logger.info("Get interface: %s", iface.name()) if not self._ifaces: self._logger.error("Can't get wifi interface") return self._ifaces
python
def interfaces(self): self._ifaces = [] wifi_ctrl = wifiutil.WifiUtil() for interface in wifi_ctrl.interfaces(): iface = Interface(interface) self._ifaces.append(iface) self._logger.info("Get interface: %s", iface.name()) if not self._ifaces: self._logger.error("Can't get wifi interface") return self._ifaces
[ "def", "interfaces", "(", "self", ")", ":", "self", ".", "_ifaces", "=", "[", "]", "wifi_ctrl", "=", "wifiutil", ".", "WifiUtil", "(", ")", "for", "interface", "in", "wifi_ctrl", ".", "interfaces", "(", ")", ":", "iface", "=", "Interface", "(", "interf...
Collect the available wlan interfaces.
[ "Collect", "the", "available", "wlan", "interfaces", "." ]
719baf73d8d32c623dbaf5e9de5d973face152a4
https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/wifi.py#L36-L50