hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eff4b8dfab3cbc36159407408200c3d6892101d9 | 29,213 | py | Python | henipipe/samTobed.py | moritzschaefer/henipipe | 0723ead3c5c9dfa9a5c163caea4531e077d3a996 | [
"MIT"
] | 4 | 2020-03-02T04:03:53.000Z | 2020-11-05T18:41:58.000Z | henipipe/samTobed.py | moritzschaefer/henipipe | 0723ead3c5c9dfa9a5c163caea4531e077d3a996 | [
"MIT"
] | 5 | 2020-08-21T08:24:26.000Z | 2022-02-01T22:59:21.000Z | henipipe/samTobed.py | moritzschaefer/henipipe | 0723ead3c5c9dfa9a5c163caea4531e077d3a996 | [
"MIT"
] | 3 | 2020-07-20T21:45:29.000Z | 2020-10-06T12:00:21.000Z | #!/usr/bin/python
import sys
try:
from collections import OrderedDict
except ImportError: #python 2.6 or 3.6+
if sys.version_info >= (3,6):
OrderedDict = dict
else:
from ordereddict import OrderedDict
import os
from itertools import groupby
from subprocess import Popen, PIPE
from io import TextIOWrapper
import re
import argparse
from six import PY3, string_types
try:
from multiprocessing.dummy.connection import Connection
except ImportError: #python2
from _multiprocessing import Connection
__version__ = '0.1'
class DefaultOrderedDict(OrderedDict):
def __init__(self, default, items=[]):
super(DefaultOrderedDict, self).__init__(items)
self._default = default
def __missing__(self, key):
self[key] = value = self._default()
return value
class GenomicOrder(object):
def __gt__(self, other):
if self.rname != other.rname:
return self.rname > other.rname
return self.pos > other.pos
def __lt__(self, other):
if self.rname != other.rname:
return self.rname < other.rname
return self.pos < other.pos
def __eq__(self, other):
return self.rname == other.rname and self.pos == other.pos
class Reader(object):
""" Read SAM/BAM format file as an iterable. """
def __init__(self, f, regions=False, kind=None, samtools_path="samtools"):
ext = None
self.samtools_path = samtools_path
self.spool = None # use this to catch alignment during reader scraping
self.type = 'sam'
try:
self._f_name = f.name
_, ext = os.path.splitext(f.name)
if f.name == '<stdin>': # stdin stream
self._sam_init(f)
elif (ext is not None and ext.lower()) == '.bam' or (kind is not None and kind.lower() == 'bam'):
self._bam_init(f, regions)
self.type = 'bam'
elif (ext is not None and ext.lower()) == '.sam' or (kind is not None and kind.lower() == 'sam'):
self._sam_init(f)
else:
self._sam_init(f)
if (regions and (ext is not None and ext.lower() != '.bam') and kind is None) or (regions and kind is not None and kind.lower() != 'bam'):
self.__exit__()
raise ValueError("Region support requires bam file.")
except AttributeError:
self._f_name = None
if isinstance(f, Connection):
self._pipe_init(f)
else:
self._sam_init(f)
def _pipe_init(self, f):
header = []
for line in iter(f.recv, ''):
if line[0] == '@':
header.append(line.rstrip('\n\r'))
else:
self.spool = line
break
self.header_as_dict(header)
self.f = iter(f.recv, '')
self._conn = 'pipe'
def _sam_init(self, f):
header = []
self.f = f
for line in self.f:
if line[0] == '@':
header.append(line.rstrip('\n\r'))
else:
self.spool = line
break
self.header_as_dict(header)
self._conn = 'file'
def _bam_init(self, f, regions):
pline = [self.samtools_path, 'view', '-H', f.name]
try:
p = Popen(pline, bufsize=-1, stdout=PIPE,
stderr=PIPE)
except OSError:
raise OSError('Samtools must be installed for BAM file support!\n')
self.header_as_dict([line.decode('utf-8').rstrip('\n\r') for line in p.stdout])
p.wait()
if regions:
try:
open(''.join([f.name, '.bai']))
except EnvironmentError:
sys.stderr.write("BAM index not found. Attempting to index file.\n")
index_p = Popen([self.samtools_path, 'index', f.name], stdout=PIPE, stderr=PIPE)
_, err = index_p.communicate()
if index_p.returncode > 0 or re.search("fail", str(err)):
raise OSError("Indexing failed. Is the BAM file sorted?\n")
else:
sys.stderr.write("Index created successfully.\n")
pline = [self.samtools_path, 'view', f.name, regions]
else:
pline = [self.samtools_path, 'view', f.name]
self.p = Popen(pline, bufsize=-1, stdout=PIPE,
stderr=PIPE)
if PY3:
self.f = TextIOWrapper(self.p.stdout)
else:
self.f = self.p.stdout
self._conn = 'proc'
def next(self):
""" Returns the next :class:`.Sam` object """
try:
if self.spool: # this will be the first alignment in a SAM file or stream
line = self.spool.rstrip('\n\r')
self.spool = None
else:
line = next(self.f).rstrip('\n\r')
if line == '':
raise StopIteration
fields = line.split('\t')
required = fields[:11]
tags = fields[11:]
return Sam(*required, tags=tags)
except StopIteration:
raise StopIteration
def __next__(self):
return self.next()
def __iter__(self):
return self
def __len__(self):
""" Returns the number of reads in an indexed BAM file.
Not implemented for SAM files. """
if self.type != 'bam':
raise NotImplementedError("len(Reader) is only implemented for BAM files.")
elif self.type == 'bam':
return sum(bam_read_count(self._f_name, self.samtools_path))
def subsample(self, n):
""" Returns an interator that draws every nth read from
the input file. Returns :class:`.Sam`. """
for i, line in enumerate(self.f):
if i % n == 0:
fields = line.split('\t')
required = fields[:11]
tags = fields[11:]
yield Sam(*required, tags=tags)
def header_as_dict(self, header):
""" Parse the header list and return a nested dictionary. """
self.header = DefaultOrderedDict(OrderedDict)
for line in header:
line = line.split('\t')
key, fields = (line[0], line[1:])
try:
self.header[key][fields[0]] = fields[1:]
except IndexError:
self.header[key][fields[0]] = ['']
@property
def seqs(self):
""" Return just the sequence names from the @SQ library as a generator. """
for key in self.header['@SQ'].keys():
yield key.split(':')[1]
def tile_genome(self, width):
""" Return a generator of UCSC-style regions tiling ``width``. """
assert isinstance(width, int)
for k, v in self.header['@SQ'].items():
rname = k.split(':')[1]
seqlength = v[0].split(':')[1]
for region in tile_region(rname, 1, int(seqlength), width):
yield region
def close(self):
self.__exit__()
def __enter__(self):
return self
def __exit__(self, *args):
if self._conn == 'file':
self.f.close()
if self._conn == 'proc':
self.f.close()
self.p.terminate()
class Writer(object):
""" Write SAM/BAM format file from :class:`.Sam` objects. """
def __init__(self, f, header=None):
try:
_, ext = os.path.splitext(f.name)
if ext == '.bam':
raise NotImplementedError('Bam writing support is not implemented.\n') # why not just pipe to samtools?
except AttributeError:
pass
self.file = f
if header is not None:
self.header = DefaultOrderedDict(OrderedDict)
self._merge_header(header)
else:
self.header = DefaultOrderedDict(OrderedDict)
self.header['@HD']['VN:1.0'] = ['SO:unknown']
self._header_dict_format()
def _merge_header(self, header):
for key, values in header.items():
for k, v in values.items():
self.header[key][k] = v
def _header_dict_format(self):
for key, value in self.header.items():
for k, v in value.items():
tags = '\t'.join(v)
self.file.write('{key}\t{k}\t{tags}\n'.format(**locals()))
def write(self, sam):
""" Write the string representation of the ``sam`` :class:`.Sam` object. """
self.file.write(str(sam))
def close(self):
self.__exit__()
def __enter__(self):
return self
def __exit__(self, *args):
self.file.close()
class Sam(GenomicOrder):
""" Object representation of a SAM entry. """
# https://github.com/samtools/hts-specs/blob/da805be01e2ceaaa69fdde9f33c5377bf9ee6369/SAMv1.tex#L383
# operations that consume the reference
_cigar_ref = set(('M', 'D', 'N', '=', 'X', 'EQ'))
# operations that consume the query
_cigar_query = set(('M', 'I', 'S', '=', 'X', 'EQ'))
# operations that do not represent an alignment
_cigar_no_align = set(('H', 'P'))
_valid_cigar = _cigar_ref | _cigar_query | _cigar_no_align
# operations that can be represented as aligned to the reference
_cigar_align = _cigar_ref & _cigar_query
# operations that only consume the reference
_cigar_ref_only = _cigar_ref - _cigar_align
# operations that only consume the query
_cigar_query_only = _cigar_query - _cigar_align
def __init__(self, qname='', flag=4, rname='*', pos=0, mapq=255, cigar='*', rnext='*', pnext=0, tlen=0, seq='*', qual='*', tags=[]):
self.qname = qname
self.flag = int(flag)
self.rname = rname
self.pos = int(pos)
self.mapq = int(mapq)
self.cigar = cigar
self.rnext = rnext
self.pnext = int(pnext)
self.tlen = int(tlen)
self.seq = seq
self.qual = qual
self._tags = tags
self._cache = dict()
def __str__(self):
""" Returns the string representation of a SAM entry. Correspondes to one line
in the on-disk format of a SAM file. """
if self.tags:
tag_fields = '\t'.join([encode_tag(tag, self.tags[tag]) for tag in sorted(self.tags.keys())])
else:
tag_fields = '\t'.join(self._tags)
return '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\n'.format(self.qname,
str(self.flag),
self.rname,
str(self.pos),
str(self.mapq),
self.cigar,
self.rnext,
str(self.pnext),
str(self.tlen),
self.seq,
self.qual,
tag_fields)
def __repr__(self):
return "Sam({0}:{1}:{2})".format(self.rname, self.pos, self.qname)
def __len__(self):
""" Returns the length of the portion of ``self.seq`` aligned to the reference. Unaligned reads will
have len() == 0. Insertions (I) and soft-clipped portions (S) will not contribute to the aligned length.
>>> x = Sam(cigar='8M2I4M1D3M4S')
>>> len(x)
16
"""
return sum(c[0] for c in self.cigars if c[1] in self._cigar_ref)
def __getitem__(self, tag):
""" Retreives the SAM tag named "tag" as a tuple: (tag_name, data). The
data type of the tag is interpreted as the proper Python object type.
>>> x = Sam(tags=['NM:i:0', 'ZZ:Z:xyz'])
>>> x['NM']
0
>>> x['ZZ']
'xyz'
"""
return self.tags[tag]
def __setitem__(self, tag, data):
""" Stores the SAM tag named "tag" with the value "data". The
data type of the tag is interpreted from the Python object type.
>>> x = Sam(tags=[])
>>> x['NM'] = 0
>>> x['NM']
0
"""
self.tags[tag] = data
def index_of(self, pos):
""" Return the relative index within the alignment from a genomic position 'pos' """
i = pos - self.pos
if i >= 0:
return i
else:
raise IndexError("Position {0:n} not in {1}.".format(pos, self.qname))
def get(self, key, default_value):
try:
return self[key]
except KeyError:
return default_value
def cigar_split(self):
# https://github.com/brentp/bwa-meth
if self.cigar == "*":
yield (0, None)
raise StopIteration
cig_iter = groupby(self.cigar, lambda c: c.isdigit())
for _, n in cig_iter:
op = int("".join(n)), "".join(next(cig_iter)[1])
if op[1] in self._valid_cigar:
yield op
else:
raise ValueError("CIGAR operation %s in record %s is invalid." % (op[1], self.qname))
def gapped(self, attr, gap_char='-'):
""" Return a :class:`.Sam` sequence attribute or tag with all
deletions in the reference sequence represented as 'gap_char' and all
insertions in the reference sequence removed. A sequence could
be :class:``Sam.seq``, ``Sam.qual``, or any :class:`.Sam` tag that
represents an aligned sequence, such as a methylation tag for bisulfite
sequencing libraries.
>>> x = Sam(*'r001\t99\tref\t7\t30\t8M2I4M1D3M\t=\t37\t39\tTTAGATAAAGGATACTG\t*'.split())
>>> x.gapped('seq')
'TTAGATAAGATA-CTG'
>>> x = Sam(*'r001\t99\tref\t7\t30\t8M2I4M1D3M\t=\t37\t39\tTTAGATAAAGGATACTG\t*'.split(), tags=['ZM:Z:.........M....M.M'])
>>> x.gapped('ZM')
'............-M.M'
"""
try:
ungapped = getattr(self, attr)
except AttributeError:
ungapped = self[attr] # get dictionary key (tag) if attribute is missing
if len(ungapped) != len(self.seq):
raise ValueError("The length of the '%s' attribute is not equal to the length of Sam.seq!" % attr)
gapped = []
i = 0
for n, t in self.cigars:
if t in self._cigar_align:
gapped.extend(ungapped[i:i + n])
i += n
elif t in self._cigar_ref_only:
gapped.extend([gap_char] * n)
elif t in self._cigar_query_only:
i += n
elif t in self._cigar_no_align:
pass
return ''.join(gapped)
def parse_md(self):
""" Return the ungapped reference sequence from the MD tag, if present.
"""
try:
return self._cache['parse_md']
except KeyError:
pass
try:
md = self['MD']
except KeyError:
raise KeyError('MD tag not found in SAM record.')
ref_seq = list(self.gapped('seq'))
md_match = re.findall(r"([0-9]+)\^?([A-Z]+)?", md)
ref_seq_i = 0
for i, b in md_match:
ref_seq_i += int(i)
for mismatch in b:
try:
ref_seq[ref_seq_i] = mismatch
except IndexError:
raise IndexError(locals())
ref_seq_i += 1
self._cache['parse_md'] = ref_seq
return ref_seq
@property
def cigars(self):
""" Returns the CIGAR string as a tuple.
>>> x = Sam(cigar='8M2I4M1D3M')
>>> x.cigars
((8, 'M'), (2, 'I'), (4, 'M'), (1, 'D'), (3, 'M'))
"""
try:
return self._cache['cigars']
except KeyError:
self._cache['cigars'] = tuple(self.cigar_split())
return self._cache['cigars']
@property
def tags(self):
""" Parses the tags string to a dictionary if necessary.
>>> x = Sam(tags=['XU:Z:cgttttaa', 'XB:Z:cttacgttaagagttaac', 'MD:Z:75', 'NM:i:0', 'NH:i:1', 'RG:Z:1'])
>>> sorted(x.tags.items(), key=lambda x: x[0])
[('MD', '75'), ('NH', 1), ('NM', 0), ('RG', '1'), ('XB', 'cttacgttaagagttaac'), ('XU', 'cgttttaa')]
"""
try:
return self._cache['tags']
except KeyError:
self._cache['tags'] = parse_sam_tags(self._tags)
return self._cache['tags']
@property
def paired(self):
""" Returns True if the read is paired and
each segment properly aligned according to the aligner. """
return bool(self.flag & 0x2)
@property
def mapped(self):
""" Returns True of the read is mapped. """
return not (self.flag & 0x4)
@property
def secondary(self):
""" Returns True if the read alignment is secondary. """
return bool(self.flag & 0x100)
@property
def reverse(self):
""" Returns True if ``Sam.seq`` is being reverse complemented. """
return bool(self.flag & 0x10)
@property
def passing(self):
""" Returns True if the read is passing filters, such as platform/vendor quality controls. """
return not bool(self.flag & 0x200)
@property
def duplicate(self):
""" Returns True if the read is a PCR or optical duplicate. """
return bool(self.flag & 0x400)
@property
def coords(self):
""" Return a list of genomic coordinates for the gapped alignment. """
return range(self.pos, self.pos + len(self))
@property
def safename(self):
"""Return ``Sam.qname`` without paired-end identifier if it exists"""
if self.qname[-2] == '/':
return self.qname[:-2]
else:
return self.qname
def parse_sam_tags(tagfields):
""" Return a dictionary containing the tags """
return dict([(tag, data) for tag, dtype, data in [decode_tag(x) for x in tagfields]])
def encode_tag(tag, data):
""" Write a SAM tag in the format ``TAG:TYPE:data``. Infers the data type
from the Python object type.
>>> encode_tag('YM', '#""9O"1@!J')
'YM:Z:#""9O"1@!J'
"""
if isinstance(data, string_types):
data_type = 'Z'
elif isinstance(data, int):
data_type = 'i'
elif isinstance(data, float):
data_type = 'f'
else:
raise NotImplementedError("Data {0} cannot be encoded as string, integer, or float tag.".format(data))
value = ':'.join((tag, data_type, str(data)))
return value
def decode_tag(tag_string):
""" Parse a SAM format tag to a (tag, type, data) tuple. Python object
types for data are set using the type code. Supported type codes are: A, i, f, Z, H, B
>>> decode_tag('YM:Z:#""9O"1@!J')
('YM', 'Z', '#""9O"1@!J')
>>> decode_tag('XS:i:5')
('XS', 'i', 5)
>>> decode_tag('XF:f:100.5')
('XF', 'f', 100.5)
"""
try:
tag, data_type, data = tag_string.split(':')
except ValueError:
match = re.match(r'([A-Z]{2}):([iZfHB]):(\S+)', tag_string)
tag = match.group(1)
data_type = match.group(2)
data = match.group(3)
if data_type == 'i':
return (tag, data_type, int(data))
elif data_type == 'Z':
return (tag, data_type, data)
elif data_type == 'f':
return (tag, data_type, float(data))
elif data_type == 'A': # this is just a special case of a character
return (tag, data_type, data)
elif data_type == 'H':
raise NotImplementedError("Hex array SAM tags are currently not parsed.")
elif data_type == 'B':
raise NotImplementedError("Byte array SAM tags are currently not parsed.")
else:
raise NotImplementedError("Tag {0} cannot be parsed.".format(tag_string))
def tile_region(rname, start, end, step):
""" Make non-overlapping tiled windows from the specified region in
the UCSC-style string format.
>>> list(tile_region('chr1', 1, 250, 100))
['chr1:1-100', 'chr1:101-200', 'chr1:201-250']
>>> list(tile_region('chr1', 1, 200, 100))
['chr1:1-100', 'chr1:101-200']
"""
while start + step <= end:
yield '%s:%d-%d' % (rname, start, start + step - 1)
start += step
if start < end:
yield '%s:%d-%d' % (rname, start, end)
def bam_read_count(bamfile, samtools_path="samtools"):
""" Return a tuple of the number of mapped and unmapped reads in a BAM file """
p = Popen([samtools_path, 'idxstats', bamfile], stdout=PIPE)
mapped = 0
unmapped = 0
for line in p.stdout:
rname, rlen, nm, nu = line.rstrip().split()
mapped += int(nm)
unmapped += int(nu)
return (mapped, unmapped)
class fragment(object):
def __init__(self, loc, bed_start, end, fraglen, R1_mapq, R2_mapq, R1_flag, R2_flag, direction):
self.loc = loc
self.bed_start = bed_start
self.end = end
self.fraglen = fraglen
self.R1_mapq = R1_mapq
self.R2_mapq = R2_mapq
self.R1_flag = R1_flag
self.R2_flag = R2_flag
self.direction = direction
def __repr__(self):
return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (self.loc,self.bed_start,self.end,self.fraglen,self.R1_mapq,self.R2_mapq,self.R1_flag, self.R2_flag, self.direction)
def __str__(self):
return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (self.loc,self.bed_start,self.end,self.fraglen,self.R1_mapq,self.R2_mapq,self.R1_flag, self.R2_flag, self.direction)
class fragments(object):
def __init__(self, in_file, out_file=None, skip_dups = True, filter_threshes = None):
#self.in_file = in_file
self.read_count=0
self.fragment_count = 0
if filter_threshes is None:
self.filter_threshes = [float("-inf"),float("inf")]
else:
self.filter_threshes = filter_threshes
self.passed_filter = 0
if skip_dups:
self.forward_list = [99, 163, 355, 419]
self.plus_list = [99, 355]
else:
self.forward_list = [99, 163, 355, 419, 1123, 1187]
self.plus_list = [99, 355, 1123]
if out_file is None:
self.return_fragments(in_file)
else:
self.write_fragments(in_file, out_file)
# self.generator = self.parse_sam_file(self.in_sam)
def load_fragments(self, in_file):
in_sam = Reader(in_file)
iterator = iter(in_sam)
done_looping = False
fragments = []
while not done_looping:
try:
read1 = next(iterator)
self.read_count += 1
read2 = next(iterator)
self.read_count += 1
except StopIteration:
done_looping = True
#print("Processed %s reads. Found %s usable fragments" % (self.read_count, self.fragment_count))
else:
return_val = self.qualify_reads(read1, read2)
if return_val is not None:
fragments.append(return_val)
#print(str(self.qualify_reads(read1, read2)))
return fragments
def return_fragments(self, in_file):
in_sam = Reader(in_file)
iterator = iter(in_sam)
done_looping = False
while not done_looping:
try:
read1 = next(iterator)
self.read_count += 1
read2 = next(iterator)
self.read_count += 1
except StopIteration:
done_looping = True
except IOError:
done_looping = True
#print("Processed %s reads. Found %s usable fragments" % (self.read_count, self.fragment_count))
else:
return_val = self.qualify_reads(read1, read2)
if return_val is not None:
sys.stdout.write(str(return_val))
#print(str(self.qualify_reads(read1, read2)))
def write_fragments(self, in_file, out_file):
in_sam = Reader(in_file)
iterator = iter(in_sam)
done_looping = False
fragments = []
while not done_looping:
try:
read1 = next(iterator)
self.read_count += 1
read2 = next(iterator)
self.read_count += 1
except StopIteration:
done_looping = True
print("\n[SAM2BED] Output: \n Processed %s read pairs. Found %s usable fragments.\nOf these, %s passed_filter.\n" % (self.read_count/2, self.fragment_count, self.passed_filter))
else:
return_val = self.qualify_reads(read1, read2, filter = self.filter_threshes)
if return_val is not None:
out_file.writelines(str(line) for line in [return_val])
# def qualify_reads_old(self, read1, read2, filter = [float("-inf"),float("inf")]):
# direction = "Unk"
# r1_unique = r2_unique = False
# if read1.flag in self.forward_list:
# if "XS" not in read1.tags:
# r1_unique=True
# if "XS" not in read2.tags:
# r2_unique=True
# begin = read1.pos
# end = (read2.pos + len(read2.seq))
# direction = "+"
# if read2.flag in self.forward_list:
# if "XS" not in read2.tags:
# r2_unique=True
# if "XS" not in read1.tags:
# r1_unique=True
# begin = read2.pos
# end = read1.pos - len(read1.seq)
# direction = "-"
# if r1_unique and r2_unique and direction is not "Unk" and (end - begin) >0 :
# self.fragment_count+=1
# if (end - begin) > filter[0] and (end - begin) < filter[1]:
# self.passed_filter+=1
# return fragment(read1.rname, begin, end, end - begin, read1.mapq, read2.mapq, read1.flag, read2.flag, direction)
# Sam fields: qname='', flag=4, rname='*', pos=0, mapq=255, cigar='*', rnext='*', pnext=0, tlen=0, seq='*', qual='*', tags=[]
def qualify_reads(self, read1, read2, filter = [float("-inf"),float("inf")]):
direction = "Unk"
r1_unique = r2_unique = False
if read1.flag in self.forward_list:
if "XS" not in read1.tags:
r1_unique=True
if "XS" not in read2.tags:
r2_unique=True
begin = read1.pos
end = read1.pos + read1.tlen
direction = "+"
if read2.flag in self.forward_list:
if "XS" not in read2.tags:
r2_unique=True
if "XS" not in read1.tags:
r1_unique=True
begin = read2.pos
end = read2.pos + read2.tlen
direction = "-"
if r1_unique and r2_unique and direction is not "Unk" and (end - begin) >0 and read1.rnext == "=" and read2.rnext == "=":
self.fragment_count+=1
if (end - begin) > filter[0] and (end - begin) < filter[1]:
self.passed_filter+=1
return fragment(read1.rname, (begin -1) , (end - 1) , end - begin, read1.mapq, read2.mapq, read1.flag, read2.flag, direction)
def run_sam2bed():
parser = argparse.ArgumentParser('A script for converting sam file to bed for eventual bedgraph conversion')
parser.add_argument('sam_bam', type=str, default = sys.stdin, help='sam or bam fil or stdin')
parser.add_argument('--out_bed', '-o', default = None, help='bed output', required=False)
parser.add_argument('--filter_high', '-fh', type = int, default = None, help='filter_high', required=False)
parser.add_argument('--filter_low', '-fl', default = None, type = int, help='filter_high', required=False)
args = parser.parse_args()
if args.sam_bam=="-":
args.sam_bam = sys.stdin
else:
args.sam_bam = open(args.sam_bam, 'r')
filter_threshes = []
if args.filter_low is not None:
filter_threshes.append(args.filter_low)
else:
filter_threshes.append(float("-inf"))
if args.filter_high is not None:
filter_threshes.append(args.filter_high)
else:
filter_threshes.append(float("inf"))
if args.out_bed is not None:
of = open(args.out_bed, "w")
fragments(args.sam_bam, out_file=of, filter_threshes = filter_threshes)
of.close()
else:
fragments(args.sam_bam, filter_threshes = filter_threshes)
if __name__ == '__main__':
run_sam2bed()
| 37.309068 | 194 | 0.538493 |
3ea3148bc4720b1552b1edab91fe8b0ca0dad600 | 2,069 | py | Python | src/main.py | JhonJBautistaB/challenge-python-02 | acf71b332da7fa2f47bfa640ca730b46f1dc3f3b | [
"MIT"
] | null | null | null | src/main.py | JhonJBautistaB/challenge-python-02 | acf71b332da7fa2f47bfa640ca730b46f1dc3f3b | [
"MIT"
] | null | null | null | src/main.py | JhonJBautistaB/challenge-python-02 | acf71b332da7fa2f47bfa640ca730b46f1dc3f3b | [
"MIT"
] | null | null | null | # Resolve the problem!!
import string
import random
SYMBOLS = list('!"#$%&\'()*+,-./:;?@[]^_`{|}~')
LOWERCASE = list(string.ascii_lowercase)
UPPERCASE = list(string.ascii_uppercase)
DIGITS = list(string.digits)
def generate_password():
# Start coding here
password_sec = ''
password = []
password_len = random.randint(8,16)
print('Password_Len: ', password_len)
while True:
if password_len != len(password):
password.append(random.choice(LOWERCASE))
if password_len != len(password):
password.append(random.choice(UPPERCASE))
if password_len != len(password):
password.append(random.choice(DIGITS))
if password_len != len(password):
password.append(random.choice(SYMBOLS))
else:
break
random.shuffle(password)
password_sec = ''.join(password)
print('su password es:', password_sec)
return password_sec
def validate(password):
if len(password) >= 8 and len(password) <= 16:
has_lowercase_letters = False
has_numbers = False
has_uppercase_letters = False
has_symbols = False
for char in password:
if char in string.ascii_lowercase:
has_lowercase_letters = True
break
for char in password:
if char in string.ascii_uppercase:
has_uppercase_letters = True
break
for char in password:
if char in string.digits:
has_numbers = True
break
for char in password:
if char in SYMBOLS:
has_symbols = True
break
if has_symbols and has_numbers and has_lowercase_letters and has_uppercase_letters:
return True
return False
def run():
password = generate_password()
if validate(password):
print('Secure Password')
else:
print('Insecure Password')
if __name__ == '__main__':
run()
| 25.54321 | 91 | 0.581924 |
589a993273b3d5948296ca72097266ae37b4884e | 19,916 | py | Python | pkg/build/builder/ansible/callback/live.py | kayanme/werf | 655dbdc5f5ddba6e604274d29ee3d885ed79559e | [
"Apache-2.0"
] | 1 | 2022-03-29T23:00:43.000Z | 2022-03-29T23:00:43.000Z | pkg/build/builder/ansible/callback/live.py | kayanme/werf | 655dbdc5f5ddba6e604274d29ee3d885ed79559e | [
"Apache-2.0"
] | 7 | 2021-09-17T13:42:27.000Z | 2021-09-17T17:14:08.000Z | pkg/build/builder/ansible/callback/live.py | kayanme/werf | 655dbdc5f5ddba6e604274d29ee3d885ed79559e | [
"Apache-2.0"
] | 1 | 2021-09-22T04:29:43.000Z | 2021-09-22T04:29:43.000Z | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: live
type: stdout
short_description: screen output for solo mode
version_added: historical
description:
- Solo mode with live stdout for raw and script tasks with fallback to minimal
'''
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.vars.clean import strip_internal_keys
from ansible.module_utils._text import to_text
from ansible.utils.color import stringc
from ansible.errors import AnsibleError
import os
import json, re
from collections import Iterable
import logboek
from werf.live_stdout import LiveStdoutListener
# Taken from Dstat
class vt100:
black = '\033[0;30m'
darkred = '\033[0;31m'
darkgreen = '\033[0;32m'
darkyellow = '\033[0;33m'
darkblue = '\033[0;34m'
darkmagenta = '\033[0;35m'
darkcyan = '\033[0;36m'
gray = '\033[0;37m'
darkgray = '\033[1;30m'
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
blue = '\033[1;34m'
magenta = '\033[1;35m'
cyan = '\033[1;36m'
white = '\033[1;37m'
blackbg = '\033[40m'
redbg = '\033[41m'
greenbg = '\033[42m'
yellowbg = '\033[43m'
bluebg = '\033[44m'
magentabg = '\033[45m'
cyanbg = '\033[46m'
whitebg = '\033[47m'
reset = '\033[0;0m'
bold = '\033[1m'
reverse = '\033[2m'
underline = '\033[4m'
clear = '\033[2J'
# clearline = '\033[K'
clearline = '\033[2K'
save = '\033[s'
restore = '\033[u'
save_all = '\0337'
restore_all = '\0338'
linewrap = '\033[7h'
nolinewrap = '\033[7l'
up = '\033[1A'
down = '\033[1B'
right = '\033[1C'
left = '\033[1D'
class lColor:
COLOR_OK = vt100.darkgreen
COLOR_CHANGED = vt100.darkyellow
COLOR_ERROR = vt100.darkred
COLOR_DEBUG = vt100.darkgray
class LiveCallbackHelpers(CallbackBase):
def __init__(self):
super(LiveCallbackHelpers, self).__init__()
def LogArgs(self, *args):
logboek.Log(u''.join(self._flatten(args)).encode('utf-8'))
# nested arrays into flat array # action(module name)
# action(module name) 'task name'
def _flatten(self, l):
"""Yield items from any nested iterable"""
if isinstance(l, (unicode, str, bytes)):
yield l
return
for x in l:
if not x:
continue
if isinstance(x, Iterable) and not isinstance(x, (unicode, str, bytes)):
for sub_x in self._flatten(x):
yield sub_x
else:
yield x
# string methods
def _squash_center(self, s, l, placeholder='...'):
pl = len(placeholder)
if len(s) > l:
# edge length of s to display
sp = int((l - pl)/2)
return u'%s%s%s' % (s[0:sp], placeholder, s[len(s)-sp-1+(l%2):])
else:
return s
def _squash_right(self, s, l, placeholder='...'):
pl = len(placeholder)
if len(s) > l:
return u'%s%s' % (s[0:l-pl], placeholder)
else:
return s
def _clean_str(self, s):
s = to_text(s)
s = re.sub(r'\s+', r' ', s, flags=re.UNICODE)
return s.strip()
def _indent(self, indent, s):
parts = re.split(r'(\n)', s)
return ''.join(p if p == "\n" else '%s%s' % (indent, p) for p in parts)
class CallbackModule(LiveCallbackHelpers):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'live'
HEADER_PLACEHOLDER = '...'
HEADER_NAME_INFO_LEN = 55
HEADER_INFO_MIN_LEN = 5 + 3 + 5 # 5 letters from the edges and a placeholder length
# name for this tasks can be generated from free_form (_raw_params argument)
FREE_FORM_MODULES = ('raw', 'script', 'command', 'shell', 'meta')
# Modules that are optimized by squashing loop items into a single call to
# the module, mostly packaging modules with name argument
# (apt, apk, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper)
SQUASH_LOOP_MODULES = frozenset(C.DEFAULT_SQUASH_ACTIONS)
def __init__(self):
super(CallbackModule, self).__init__()
self._play = None
self._live_stdout_listener = LiveStdoutListener()
# header format is:
# action 'task name' [significant args info]
# if task name length exceed its maximum then format is:
# action 'task name'
# if no task name:
# action [significant args info]
# task name and significant args info are squashed to fit into available space
def _task_details(self, task, start=False):
task_name = self._clean_str(task.name)
info = self._get_task_info_from_args(task, start) or ''
if info != '':
info_space = self.HEADER_NAME_INFO_LEN - len(task_name)
if info_space >= self.HEADER_INFO_MIN_LEN or info_space >= len(info):
info = ' [%s]' % self._squash_center(info, info_space-3)
else:
info = ''
if task_name != '':
if len(task_name)+len(info) > self.HEADER_NAME_INFO_LEN:
task_name = self._squash_right(task_name, self.HEADER_NAME_INFO_LEN-len(info))
task_name = " '%s'" % task_name
return u'%s%s%s' % (task.action, task_name, info)
# item details format is:
# action 'task name' item 'item_name'
# if no task_name:
# action item 'item_name'
# task_name and item_name are squashed if cannot fit into available space
def _item_details(self, task, item_result):
task_name = self._clean_str(task.name)
if '_ansible_item_label' in item_result:
item_name = item_result.get('_ansible_item_label','')
else:
item_name = self._clean_str(item_result.get('item', ''))
if task_name != '':
task_space = self.HEADER_NAME_INFO_LEN - len(item_name)
if task_space >= self.HEADER_INFO_MIN_LEN or task_space >= len(task_name):
task_name = self._squash_right(task_name, task_space - 3)
task_name = " '%s'" % task_name
else:
task_name = ''
if item_name != '':
if len(task_name)+len(item_name) > self.HEADER_NAME_INFO_LEN:
item_name = self._squash_right(item_name, self.HEADER_NAME_INFO_LEN-len(task_name))
item_name = " item '%s'" % (item_name)
return u'%s%s%s' % (task.action, task_name, item_name)
# Return content from significant arguments for well known modules
# Also support items for the loops.
def _get_task_info_from_args(self, task, start=False):
info = ''
if task.action in self.FREE_FORM_MODULES:
info = task.args.get('_raw_params', '')
if task.action == 'file':
info = task.args.get('path','')
if task.action == 'copy':
info = task.args.get('dest','')
if task.action == 'group':
info = task.args.get('name','')
if task.action == 'user':
info = task.args.get('name','')
if task.action == 'get_url':
info = task.args.get('url','')
if task.action == 'getent':
db = task.args.get('database','')
key = task.args.get('key','')
info = '%s %s' % (db, key)
if task.action == 'apk':
info = task.args.get('name', '')
if task.action == 'apt':
info1 = task.args.get('name', None)
info2 = task.args.get('package', None)
info3 = task.args.get('pkg', None)
info = ', '.join(list(self._flatten([info1, info2, info3])))
if task.action == 'apt_repository':
info = task.args.get('repo', '')
if task.action == 'apt_key':
info = task.args.get('id', '')
if task.action == 'unarchive':
info = task.args.get('src', '')
if task.action == 'locale_gen':
info = task.args.get('name', '')
if task.action == 'lineinfile':
info = task.args.get('path', '')
if task.action == 'blockinfile':
info = task.args.get('path', '')
if task.action == 'composer':
info = task.args.get('command', 'install')
if task.loop and start:
loop_args = task.loop
if len(loop_args) > 0:
info = "'%s' over %s" % (info, to_text(loop_args))
return self._clean_str(info)
# display task result content with indentation
# Normally each item is displayed separately. But there are squashed
# modules, where items are squashed into list and the result is in the first 'results' item.
def _display_msg(self, task, result, color):
if task.action in self.SQUASH_LOOP_MODULES and 'results' in result:
if len(result['results']) > 0:
self.LogArgs(stringc(result['results'][0], color), "\n")
return
# prevent dublication of stdout in case of live_stdout
if not self._live_stdout_listener.is_live_stdout():
stdout = result.get('stdout', result.get('module_stdout', ''))
if stdout:
self.LogArgs(vt100.bold, "stdout:", vt100.reset, "\n")
self.LogArgs(self._indent(' ', stdout), "\n")
stderr = result.get('stderr', result.get('module_stderr', ''))
if stderr:
self.LogArgs(vt100.bold, "stderr:", vt100.reset, "\n")
self.LogArgs(self._indent(' ', stringc(stderr, C.COLOR_ERROR)), "\n")
if self._msg_is_needed(task, result):
self.LogArgs(stringc(u"\n".join(self._flatten(result['msg'])), color), "\n")
if 'rc' in result:
exitCode = result['rc']
exitColor = C.COLOR_OK
if exitCode != '0' and exitCode != 0:
exitColor = C.COLOR_ERROR
self.LogArgs(stringc('exit code: %s' % exitCode, exitColor), "\n")
def _msg_is_needed(self, task, result):
if 'msg' not in result:
return False
# No need to display msg for loop task, because each item is displayed separately.
# Msg is needed if there are no items.
if 'results' in result:
if len(result['results']) > 0:
return False
# TODO more variants...
return True
def _display_debug_msg(self, task, result):
#if (self._display.verbosity > 0 or '_ansible_verbose_always' in result) and '_ansible_verbose_override' not in result:
if task.args.get('msg'):
color = C.COLOR_OK
msg = u"\n".join(self._flatten(result.get('msg', '')))
if task.args.get('var'):
var_key = task.args.get('var')
if isinstance(var_key, (list, dict)):
var_key = to_text(type(var_key))
var_obj = result.get(var_key)
self.LogArgs(vt100.bold,
"var=%s" % to_text(task.args.get('var')),
", ", stringc(to_text(type(var_obj)), C.COLOR_DEBUG),
vt100.reset, "\n")
if isinstance(var_obj, (unicode, str, bytes)):
color = C.COLOR_OK
if 'IS NOT DEFINED' in var_obj:
color = C.COLOR_ERROR
msg = var_obj
else:
color = C.COLOR_OK
msg = json.dumps(var_obj, indent=4)
self.LogArgs(stringc(msg, color), "\n")
# TODO remove stdout here if live_stdout!
# TODO handle results for looped tasks
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(result)
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
# remove msg, failed, changed
#if 'msg' in abridged_result:
# del abridged_result['msg']
if 'failed' in abridged_result:
del abridged_result['failed']
if 'changed' in abridged_result:
del abridged_result['changed']
if len(abridged_result) > 0:
return json.dumps(abridged_result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
return ''
def v2_playbook_on_play_start(self, play):
self._play = play
logboek.Init()
try:
cols = int(os.environ['COLUMNS'])
except:
cols = 140
#cols=60
self.HEADER_NAME_INFO_LEN = cols-2
logboek.SetTerminalWidth(cols)
logboek.EnableFitMode()
#logboek.LogProcessStart(play.name)
self._live_stdout_listener.start()
def v2_playbook_on_stats(self, stats):
#pass
self._live_stdout_listener.stop()
#if stats.failures:
# logboek.LogProcessFail()
#else:
# logboek.LogProcessEnd()
def v2_playbook_on_task_start(self, task, is_conditional):
self._display.v("TASK action=%s args=%s" % (task.action, json.dumps(task.args, indent=4)))
if self._play.strategy == 'free':
return
# task header line
logboek.LogProcessStart(self._task_details(task, start=True).encode('utf-8'))
# reset live_stdout flag on task start
self._live_stdout_listener.set_live_stdout(False)
def v2_runner_on_ok(self, result):
self._display.v("TASK action=%s OK => %s" % (result._task.action, json.dumps(result._result, indent=4)))
self._clean_results(result._result, result._task.action)
self._handle_warnings(result._result)
try:
task = result._task
color = C.COLOR_OK
if 'changed' in result._result and result._result['changed']:
color = C.COLOR_CHANGED
# task result info if any
if task.action == 'debug':
self._display_debug_msg(result._task, result._result)
else:
self._display_msg(result._task, result._result, color)
except Exception as e:
self.LogArgs(stringc(u'Exception: %s'%e, C.COLOR_ERROR), "\n")
finally:
# task footer line
logboek.LogProcessEnd()
def v2_runner_item_on_ok(self, result):
self._display.v("TASK action=%s item OK => %s" % (result._task.action, json.dumps(result._result, indent=4)))
self._clean_results(result._result, result._task.action)
self._handle_warnings(result._result)
task = result._task
if task.action in self.SQUASH_LOOP_MODULES:
return
color = C.COLOR_OK
if 'changed' in result._result and result._result['changed']:
color = C.COLOR_CHANGED
# item result info if any
if task.action == 'debug':
self._display_debug_msg(result._task, result._result)
else:
self._display_msg(result._task, result._result, color)
logboek.LogProcessStepEnd(u''.join([
vt100.reset, vt100.bold,
self._clean_str(self._item_details(task, result._result)), vt100.reset,
' ',
stringc(u'[OK]', color)
]).encode('utf-8')
)
# reset live_stdout flag on item end
self._live_stdout_listener.set_live_stdout(False)
def v2_runner_on_failed(self, result, **kwargs):
self._display.v("TASK action=%s FAILED => %s" % (result._task.action, json.dumps(result._result, indent=4)))
self._handle_exception(result._result)
self._handle_warnings(result._result)
try:
task = result._task
# task result info if any
self._display_msg(task, result._result, C.COLOR_ERROR)
except Exception as e:
logboek.Log(u"{0}\n".format(e).encode("utf-8"))
finally:
logboek.LogProcessFail()
def v2_runner_item_on_failed(self, result, ignore_errors=False):
self._display.v("TASK action=%s ITEM FAILED => %s" % (result._task.action, json.dumps(result._result, indent=4)))
self._handle_exception(result._result)
self._handle_warnings(result._result)
task = result._task
if task.action in self.SQUASH_LOOP_MODULES:
return
# task item result info if any
self._display_msg(task, result._result, C.COLOR_ERROR)
# task item status line
logboek.LogProcessStepEnd(u''.join([
vt100.reset, vt100.bold,
self._clean_str(self._item_details(task, result._result)), vt100.reset,
' ',
stringc(u'[FAIL]', C.COLOR_ERROR),
]).encode('utf-8')
)
# reset live_stdout flag on item end
self._live_stdout_listener.set_live_stdout(False)
def v2_runner_on_skipped(self, result):
self.LogArgs(stringc("SKIPPED", C.COLOR_SKIP), "\n")
logboek.LogProcessEnd()
# Implemented for completeness. Local connection cannot be unreachable.
def v2_runner_on_unreachable(self, result):
self.LogArgs(stringc("UNREACHABLE!", C.COLOR_UNREACHABLE), "\n")
logboek.LogProcessEnd()
def v2_on_file_diff(self, result):
if 'diff' in result._result and result._result['diff']:
self.LogArgs(self._get_diff(result._result['diff']), "\n")
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. The full traceback is:\n" + result['exception']
del result['exception']
self.LogArgs(stringc(msg, C.COLOR_ERROR))
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self.LogArgs(stringc(u'[WARNING]: %s' % warning, C.COLOR_WARN))
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self.LogArgs(stringc(self._deprecated_msg(**warning), C.COLOR_DEPRECATE))
del res['deprecations']
def _deprecated_msg(self, msg, version=None, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise AnsibleError("[DEPRECATED]: %s.\nPlease update your playbooks." % msg)
return new_msg
| 37.018587 | 129 | 0.593995 |
97ea9ffef8fd49a580ded669a1dd10014162c433 | 6,957 | py | Python | math/math.py | donutloop/machine_learning_examples | 46192a57e2dd194925ae76d6bfb169cd2af142dd | [
"MIT"
] | 1 | 2018-10-08T18:24:40.000Z | 2018-10-08T18:24:40.000Z | math/math.py | donutloop/machine_learning_examples | 46192a57e2dd194925ae76d6bfb169cd2af142dd | [
"MIT"
] | null | null | null | math/math.py | donutloop/machine_learning_examples | 46192a57e2dd194925ae76d6bfb169cd2af142dd | [
"MIT"
] | 1 | 2018-10-09T06:50:48.000Z | 2018-10-09T06:50:48.000Z | import math
import numpy as np
def basic_sigmoid(x):
"""
Compute sigmoid of x.
Arguments:
x -- A scalar
Return:
s -- sigmoid(x)
"""
s = 1 / (1+math.exp(-x))
return s
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size
Return:
s -- sigmoid(x)
"""
s = 1 / (1+np.exp(-x))
return s
def sigmoid_derivative(x):
"""
Compute the gradient (also called the slope or derivative) of the sigmoid function with respect to its input x.
You can store the output of the sigmoid function into variables and then use it to calculate the gradient.
Arguments:
x -- A scalar or numpy array
Return:
ds -- Your computed gradient.
"""
s = sigmoid(x)
ds =s * (1 - s)
return ds
def image2vector(image):
"""
Argument:
image -- a numpy array of shape (length, height, depth)
Returns:
v -- a vector of shape (length*height*depth, 1)
"""
v = image.reshape((image.shape[0]*image.shape[1]*image.shape[2], 1))
return v
def normalizeRows(x):
"""
Implement a function that normalizes each row of the matrix x (to have unit length).
Argument:
x -- A numpy matrix of shape (n, m)
Returns:
x -- The normalized (by row) numpy matrix. You are allowed to modify x.
"""
x_norm = np.linalg.norm(x, ord = 2, axis = 1, keepdims = True)
# Divide x by its norm.
x = x / x_norm
return x
def softmax(x):
"""Calculates the softmax for each row of the input x.
Your code should work for a row vector and also for matrices of shape (n, m).
Argument:
x -- A numpy matrix of shape (n,m)
Returns:
s -- A numpy matrix equal to the softmax of x, of shape (n,m)
"""
# Apply exp() element-wise to x.
x_exp = np.exp(x)
# Create a vector x_sum that sums each row of x_exp.
x_sum = np.sum(x_exp, axis = 1, keepdims = True)
# Compute softmax(x) by dividing x_exp by x_sum.
s = x_exp / x_sum
return s
def any():
x1 = [9, 2, 5, 0, 0, 7, 5, 0, 0, 0, 9, 2, 5, 0, 0]
x2 = [9, 2, 2, 9, 0, 9, 2, 5, 0, 0, 9, 2, 5, 0, 0]
### VECTORIZED DOT PRODUCT OF VECTORS ###
dot = np.dot(x1,x2)
### VECTORIZED OUTER PRODUCT ###
outer = np.outer(x1,x2)
### VECTORIZED ELEMENTWISE MULTIPLICATION ###
mul = np.multiply(x1,x2)
### VECTORIZED GENERAL DOT PRODUCT ###
W = np.random.rand(3,len(x1))
dot = np.dot(W,x1)
def L1(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L1 loss function defined above
"""
loss = np.sum(np.abs(yhat-y))
return loss
def L2(yhat, y):
"""
Arguments:
yhat -- vector of size m (predicted labels)
y -- vector of size m (true labels)
Returns:
loss -- the value of the L2 loss function defined above
"""
loss = sum((y-yhat)**2)
return loss
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
A = sigmoid(np.dot(w.T,X) + b)
cost = -1 / m * np.sum(Y*np.log(A)+(1-Y)*np.log(1-A), axis = 1, keepdims = True)
dw = 1 / m * np.dot(X,(A-Y).T)
db = 1 / m * np.sum(A-Y)
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
if i % 100 == 0:
costs.append(cost)
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
A = sigmoid(np.dot(w.T,X) + b)
for i in range(A.shape[1]):
Y_prediction[0,i] = np.where(A[0,i]>0.5,1,0)
assert(Y_prediction.shape == (1, m))
return Y_prediction | 25.577206 | 115 | 0.575248 |
c9d3428bb14877481ecdeaeaaae42ffe20282b24 | 2,373 | py | Python | pysamss/gui/mayavi_qwidget.py | c-bruce/pysamss | b37206a39391e370da7ab367087ba3c2b29b1d9f | [
"MIT"
] | 1 | 2020-05-02T18:47:50.000Z | 2020-05-02T18:47:50.000Z | pysamss/gui/mayavi_qwidget.py | c-bruce/pysamss | b37206a39391e370da7ab367087ba3c2b29b1d9f | [
"MIT"
] | null | null | null | pysamss/gui/mayavi_qwidget.py | c-bruce/pysamss | b37206a39391e370da7ab367087ba3c2b29b1d9f | [
"MIT"
] | null | null | null | # Date: 16/06/2019
# Author: Callum Bruce
# MayaviQWidget Class
from tvtk.api import tvtk
from tvtk.pyface.scene_editor import SceneEditor
from mayavi import mlab
from mayavi.tools.engine_manager import EngineManager
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.core.api import PipelineBase, Source, Engine
from traits.api import HasTraits, Instance, Array
from traitsui.api import View, Item
from pyface.qt import QtGui, QtCore
def plotCylinder(scene, radius, height, center):
"""
Plot cylinder using mayavi and tvtk.
Args:
figure (mlab.figure): Mayavi figure for plot.
radius (float): Cylinder radius (m).
height (float): Cylinder height (m).
center (list): Cylinder center [x, y, z] (m).
"""
cylinder = tvtk.CylinderSource(radius=radius, height=height, center=[-center[1], center[0], center[2]], resolution=90)
cylinder_mapper = tvtk.PolyDataMapper(input_connection=cylinder.output_port)
cylinder_actor = tvtk.Actor(mapper=cylinder_mapper, orientation=[0, 0, -90])
scene.add_actor(cylinder_actor)
class Visualization(HasTraits):
"""
Visualization class.
Notes:
- tvtk.Actor() objects can be added to self.scene in the normal way.
"""
scene3d = Instance(MlabSceneModel, ())
def __init__(self, **traits):
super(Visualization, self).__init__(**traits)
mlab.pipeline.scalar_field([[0]], figure=self.scene3d.mayavi_scene) # Weird work around to get self.scene3d.mlab.orientation_axes() working
self.scene3d.mlab.orientation_axes()
view = View(Item('scene3d', editor=SceneEditor(scene_class=MayaviScene), height=500, width=500, show_label=False), resizable=True)
class MayaviQWidget(QtGui.QWidget):
"""
MayaviQWidget class.
Notes:
- Can be added as a qt widget in the normal way.
"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.visualization = Visualization()
# edit_traits call will generate the widget to embed
self.ui = self.visualization.edit_traits(parent=self, kind='subpanel').control
layout.addWidget(self.ui)
self.ui.setParent(self)
| 35.954545 | 147 | 0.705858 |
fff237644fbc4bbcef25e8d5ad15727f5e296024 | 546 | py | Python | bot/models/RSVP.py | jjcf89/Tabletop-Bot | 242aadda8c50a751cdef1615ce4cdd769b588400 | [
"MIT"
] | 3 | 2017-11-07T02:18:11.000Z | 2020-11-06T02:21:46.000Z | bot/models/RSVP.py | jjcf89/Tabletop-Bot | 242aadda8c50a751cdef1615ce4cdd769b588400 | [
"MIT"
] | null | null | null | bot/models/RSVP.py | jjcf89/Tabletop-Bot | 242aadda8c50a751cdef1615ce4cdd769b588400 | [
"MIT"
] | 5 | 2017-08-23T13:39:42.000Z | 2021-03-04T11:23:12.000Z | from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship, backref
from bot.Base import Base, Session
session = Session()
class RSVP(Base):
__tablename__ = 'rsvps'
id = Column(Integer(), primary_key=True)
event_id = Column(Integer(), ForeignKey('events.id'), index=True)
member_id = Column(Integer(), ForeignKey('members.id'), index=True)
member = relationship("Member", uselist=False, backref=backref('rsvps'))
event = relationship("Event", uselist=False, backref=backref('rsvps'))
| 30.333333 | 76 | 0.721612 |
a009c69a47a51fbd0f210dbc1995683361ad6a70 | 794 | py | Python | gcloud/taskflow3/__init__.py | gangh/bk-sops | 29f4b4915be42650c2eeee637e0cf798e4066f09 | [
"Apache-2.0"
] | 1 | 2019-12-23T07:23:35.000Z | 2019-12-23T07:23:35.000Z | gcloud/taskflow3/__init__.py | bk-sops/bk-sops | 9f5950b13473bf7b5032528b20016b7a571bb3cd | [
"Apache-2.0"
] | 9 | 2020-02-12T03:15:49.000Z | 2021-06-10T22:04:51.000Z | gcloud/taskflow3/__init__.py | tanghaiyong1989/bk-sops-ce | 7388914acc4004469982d6b5bf9cd7641bdf82f7 | [
"Apache-2.0"
] | 1 | 2022-01-17T11:32:05.000Z | 2022-01-17T11:32:05.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
default_app_config = 'gcloud.taskflow3.apps.Taskflow3Config'
| 52.933333 | 115 | 0.792191 |
bcc6bb0be6e1557832a87972ca0d3f1911b8d328 | 10,094 | py | Python | stewicombo/globals.py | USEPA/standardizedinventories | e3ba2b4056c6ae5252bf37690688c29bf674468f | [
"CC0-1.0"
] | 14 | 2018-10-11T19:36:16.000Z | 2022-03-31T18:23:28.000Z | stewicombo/globals.py | USEPA/standardizedinventories | e3ba2b4056c6ae5252bf37690688c29bf674468f | [
"CC0-1.0"
] | 50 | 2018-07-25T14:04:12.000Z | 2022-03-07T18:02:33.000Z | stewicombo/globals.py | USEPA/standardizedinventories | e3ba2b4056c6ae5252bf37690688c29bf674468f | [
"CC0-1.0"
] | 13 | 2018-05-18T06:32:43.000Z | 2021-09-22T19:17:46.000Z | # globals.py (stewicombo)
# !/usr/bin/env python3
# coding=utf-8
"""
Supporting variables and functions used in stewicombo
"""
import re
import os
import pandas as pd
import chemicalmatcher
import stewi
from stewi.globals import log, set_stewi_meta, flowbyfacility_fields,\
WRITE_FORMAT
from esupy.processed_data_mgmt import Paths, write_df_to_file,\
write_metadata_to_file, load_preprocessed_output, read_into_df,\
download_from_remote
try: MODULEPATH = os.path.dirname(
os.path.realpath(__file__)).replace('\\', '/') + '/'
except NameError: MODULEPATH = 'stewicombo/'
data_dir = MODULEPATH + 'data/'
paths = Paths()
paths.local_path = os.path.realpath(paths.local_path + "/stewicombo")
output_dir = paths.local_path
INVENTORY_PREFERENCE_BY_COMPARTMENT = {"air":["eGRID","GHGRP","NEI","TRI"],
"water":["DMR", "TRI"],
"soil":["TRI"],
"waste":["RCRAInfo","TRI"],
"output":["eGRID"]}
LOOKUP_FIELDS = ["FRS_ID", "Compartment", "SRS_ID"]
# pandas might infer wrong type, force cast skeptical columns
FORCE_COLUMN_TYPES = {
"SRS_CAS": "str"
}
KEEP_ALL_DUPLICATES = True
INCLUDE_ORIGINAL = True
KEEP_ROW_WITHOUT_DUPS = True
SOURCE_COL = "Source"
COMPARTMENT_COL = "Compartment"
COL_FUNC_PAIRS = {
"FacilityID": "join_with_underscore",
"FlowAmount": "sum",
"DataReliability": "reliablity_weighted_sum:FlowAmount"
}
COL_FUNC_DEFAULT = "get_first_item"
VOC_srs = pd.read_csv(data_dir+'VOC_SRS_IDs.csv',
dtype=str,index_col=False,header=0)
VOC_srs = VOC_srs['SRS_IDs']
def set_stewicombo_meta(file_name, category=''):
"""Creates a class of esupy FileMeta; category used for optional
categorization"""
stewicombo_meta = set_stewi_meta(file_name, category)
stewicombo_meta.tool = "stewicombo"
return stewicombo_meta
def get_id_before_underscore(inventory_id):
"""Removes substring from inventory name"""
underscore_match = re.search('_', inventory_id)
if underscore_match is not None:
inventory_id = inventory_id[0:underscore_match.start()]
return inventory_id
def getInventoriesforFacilityMatches(inventory_dict, facilitymatches,
filter_for_LCI, base_inventory=None):
"""
Retrieves stored flowbyfacility datasets based on passed dictionary
and filters them if necessary. Returns only those facilities with an FRS_ID
except for those in the base_inventory where all are returned
: param inventory_dict:
: param facilitymatches: dataframe matching FacilityMatches format
: param filter_for_LCI:
: param base_inventory:
"""
if base_inventory is not None:
# Identify the FRS in the base inventory and keep only those
# base_inventory_FRS = facilitymatches[
# facilitymatches['Source'] == base_inventory]
base_FRS_list = list(pd.unique(facilitymatches[
facilitymatches['Source'] == base_inventory]['FRS_ID']))
columns_to_keep = list(flowbyfacility_fields.keys()) + ['Source',
'Year','FRS_ID']
inventories = pd.DataFrame()
for k in inventory_dict.keys():
inventory = stewi.getInventory(k, inventory_dict[k],
'flowbyfacility',
filter_for_LCI = filter_for_LCI)
if inventory is None:
continue
inventory["Source"] = k
# Merge in FRS_ID, ensure only single FRS added per facility ID, keeping
# first listed
facmatches = facilitymatches[facilitymatches['Source'] == k]
facmatches = facmatches.drop_duplicates(subset=['FacilityID','Source'],
keep='first')
inventory = pd.merge(inventory,
facmatches,
on=['FacilityID', 'Source'], how='left')
if inventory['FRS_ID'].isna().sum() > 0:
log.debug('Some facilities missing FRS_ID')
# If this isn't the base inventory, filter records for facilities not
# found in the base inventory
if (k is not base_inventory) & (base_inventory is not None):
inventory = inventory[inventory['FRS_ID'].isin(
base_FRS_list)]
# Add metadata
inventory["Year"] = inventory_dict[k]
cols_to_keep = [c for c in columns_to_keep if c in inventory]
inventory = inventory[cols_to_keep]
inventories = pd.concat([inventories,inventory], ignore_index=True)
return inventories
def addChemicalMatches(inventories_df):
"""Adds data for chemical matches to inventory or combined inventory df
"""
#Bring in chemical matches
inventory_list = list(inventories_df['Source'].unique())
chemicalmatches = chemicalmatcher.get_matches_for_StEWI(
inventory_list = inventory_list)
chemicalmatches = chemicalmatches[
chemicalmatches['Source'].isin(inventory_list)]
chemicalmatches = chemicalmatches.drop(columns=['FlowID'])
chemicalmatches = chemicalmatches.drop_duplicates(subset=['FlowName',
'Source'])
inventories = pd.merge(inventories_df,
chemicalmatches,
on=['FlowName','Source'],
how='left')
# Compare unmatched flows to flows_missing_SRS_ list to ensure none missing
missing_flows = inventories.loc[
inventories['SRS_ID'].isna()][['FlowName','Source']].drop_duplicates()
cm_missing = chemicalmatcher.read_cm_file('missing')
missing_flows = missing_flows.assign(missing = missing_flows['FlowName'].\
isin(cm_missing['FlowName'])==False)
if sum(missing_flows.missing)>0:
log.warning('New unknown flows identified, run chemicalmatcher')
return inventories
def addBaseInventoryIDs(inventories,facilitymatches,base_inventory):
#Add in base program ids
base_inventory_FRS = facilitymatches[
facilitymatches['Source'] == base_inventory]
base_inventory_FRS = base_inventory_FRS[['FacilityID','FRS_ID']]
#If there are more than one PGM_SYS_ID duplicates, choose only the first
base_inventory_FRS_first = base_inventory_FRS.drop_duplicates(
subset='FRS_ID',keep='first')
colname_base_inventory_id = base_inventory + '_ID'
base_inventory_FRS_first = base_inventory_FRS_first.rename(
columns={"FacilityID":colname_base_inventory_id})
#Merge this based with inventories
inventories = pd.merge(inventories,base_inventory_FRS_first,on='FRS_ID',
how='left')
#Put original facilityID into the new column when its is the source of
# the emission. This corrects mismatches in the case of more than
# one base inventory id to FRS_ID
inventory_acronyms = pd.unique(inventories['Source'])
if base_inventory in inventory_acronyms:
#The presence of an underscore indicates more than one facilityid
# was used. If this is the case, get it before the underscore
inventories['FacilityID_first'] = inventories['FacilityID']
inventories['FacilityID_first'] = inventories['FacilityID_first'].\
apply(lambda x: get_id_before_underscore(x))
inventories.loc[inventories['Source']==base_inventory,
colname_base_inventory_id] = inventories['FacilityID_first']
inventories = inventories.drop(columns='FacilityID_first')
return inventories
def storeCombinedInventory(df, file_name, category=''):
"""Stores the inventory dataframe to local directory based on category"""
meta = set_stewicombo_meta(file_name, category)
method_path = output_dir + '/' + meta.category
try:
log.info('saving ' + meta.name_data + ' to ' + method_path)
write_df_to_file(df,paths,meta)
except:
log.error('Failed to save inventory')
def getCombinedInventory(name, category=''):
"""Reads the inventory dataframe from local directory
:param name: str, name of dataset or name of file
"""
if ("."+WRITE_FORMAT) in name:
method_path = output_dir + '/' + category
inventory = read_into_df(method_path + name)
else:
meta = set_stewicombo_meta(name, category)
method_path = output_dir + '/' + meta.category
inventory = load_preprocessed_output(meta, paths)
if inventory is None:
log.info('%s not found in %s', name, method_path)
else:
log.info('loaded %s from %s',name, method_path)
return inventory
def download_stewicombo_from_remote(name):
"""Prepares metadata and downloads file via esupy"""
meta = set_stewicombo_meta(name, category = '')
log.info('attempting download of %s from %s', name, paths.remote_path)
download_from_remote(meta, paths)
def write_stewicombo_metadata(file_name, metadata_dict, category=''):
"""writes metadata specific to the combined inventory file to local
directory as a JSON file
:param file_name: str used as name of combined inventory
:param metadata_dict: dictionary of metadata to save
:param category: str, optional to save within a subfolder
"""
meta = set_stewicombo_meta(file_name, category=category)
meta.tool_meta = metadata_dict
write_metadata_to_file(paths, meta)
def compile_metadata(inventory_dict):
"""Compiles metadata from stewi inventory files for use in stewicombo
metadata file"""
inventory_meta = {}
#inventory_meta['InventoryDictionary'] = inventory_dict
for source, year in inventory_dict.items():
inventory_meta[source] = stewi.getMetadata(source, year)
return inventory_meta
def filter_by_compartment(df, compartments):
#TODO disaggregate compartments to include all children
df = df[df['Compartment'].isin(compartments)]
return df
| 41.2 | 84 | 0.665247 |
ce8860221dd99ed40ec1cf5777f77bed78b1c32a | 16,866 | py | Python | tests/MD/NM_FFopt_SpecFit2/FGrph_NMA_per.py | slamavl/QChemTool | b6b17adf6cfa8ac1db47acba93aab1ee49c1be47 | [
"MIT"
] | null | null | null | tests/MD/NM_FFopt_SpecFit2/FGrph_NMA_per.py | slamavl/QChemTool | b6b17adf6cfa8ac1db47acba93aab1ee49c1be47 | [
"MIT"
] | 1 | 2018-01-03T12:08:41.000Z | 2018-01-03T12:08:41.000Z | tests/MD/NM_FFopt_SpecFit2/FGrph_NMA_per.py | slamavl/QChemTool | b6b17adf6cfa8ac1db47acba93aab1ee49c1be47 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 17 16:27:58 2018
@author: Vladislav Sláma
"""
import numpy as np
from scipy.optimize import minimize
from QChemTool.General.units import conversion_facs_position as conv_pos
from QChemTool.General.UnitsManager import position_units
from QChemTool.QuantumChem.Classes.structure import Structure
from QChemTool.QuantumChem.Fluorographene.fluorographene import get_AMBER_MD_normal_modes,get_border_carbons_FG,Optimize_MD_AMBER_structure,constrainsFG
from QChemTool.QuantumChem.vibration import NormalModes_mol
from QChemTool.QuantumChem.Fluorographene.fluorographene import orientFG
from QChemTool.Development.polarizablesytem_periodic import PolarizableSystem
from QChemTool.Spectroscopy.spectraldensity import SpectralDensity
from QChemTool import FrequencyAxis
from QChemTool.General.UnitsManager import frequency_units
from QChemTool.QuantumChem.positioningTools import project_on_plane,fit_plane
global frcmod_filename,struc,state,FF_param
import matplotlib.pyplot as plt
optimize = False
optimize_type = 'Geom' # 'Geom', 'Freq', 'Geom+Freq'
compare_w_gauss = False
IsotropicPolar = True
charges = 'ESPfit'
state='Ground'
FF_param = {'equilibrium': {}, 'force': {}}
# =============================================================================
# Specify parameters with different then default FF value
# =============================================================================
# Parameters obtained from fitting geometry
FF_param['equilibrium']['c3-c3'] = 1.49667 #1.49652 #1.5350
FF_param['equilibrium']['cb-c3'] = 1.58288 #1.58260 #1.5350
FF_param['equilibrium']['cb-cb'] = 1.58356 #1.58270 #1.5350
FF_param['equilibrium']['c3-f'] = 1.46121 #1.45985
FF_param['equilibrium']['c3-cb-c3'] = 63.0473 #92.2077
FF_param['equilibrium']['cb-cb-c3'] = 44.4324 #83.4047
FF_param['equilibrium']['c3-c3-c3'] = 106.0817 #111.4023
FF_param['equilibrium']['ca-ca'] = 1.43 #1.45985
FF_param['equilibrium']['ca-c3'] = 1.54 #1.45985
FF_param['equilibrium']['ca-ca-c3'] = 130.71 # 120.63
FF_param['equilibrium']['ca-c3-c3'] = 131.63 # 112.09
# parameters obtained from normal mode calculation
FF_param['force']['c3-c3'] = 259.68 #262.36 #166.584 # 303.1
FF_param['force']['cb-c3'] = 327.07 #321.84 #147.702 # 303.1
FF_param['force']['cb-cb'] = 304.61 #317.40 #261.708 # 303.1
FF_param['force']['c3-f'] = 335.36 #334.06 #275.709 # 363.8
FF_param['force']['cb-fb'] = FF_param['force']['c3-f']
FF_param['force']['c3-c3-c3'] = 13.89 #38.14 #38.1812 # 63.21
FF_param['force']['cb-c3-c3'] = FF_param['force']['c3-c3-c3'] # 63.21
FF_param['force']['c3-cb-c3'] = FF_param['force']['c3-c3-c3'] # 63.21
FF_param['force']['cb-cb-c3'] = FF_param['force']['c3-c3-c3'] # 63.21
FF_param['force']['cb-c3-cb'] = FF_param['force']['c3-c3-c3'] # 63.21
FF_param['force']['cb-cb-cb'] = FF_param['force']['c3-c3-c3'] # 63.21
FF_param['force']['c3-c3-f'] = 40.97 #55.49 #23.0185 # 66.22
FF_param['force']['cb-cb-fb'] = FF_param['force']['c3-c3-f'] # 66.22
FF_param['force']['c3-cb-fb'] = FF_param['force']['cb-cb-fb'] # 66.22
# =============================================================================
#
# =============================================================================
# Set FG charges
if charges == 'Hirshfeld':
FG_charges = [0.08125,0.08125] # FG_charges[0]=inner carbon charge, FG_charges[0]= - border fluorine charge
elif charges == "ESPfit":
FG_charges = [-0.0522,-0.0522]
if compare_w_gauss or optimize:
print("Reading gaussian input files... ")
# read normal mode information from gaussian freq calculation
DIR = "/mnt/sda2/PhD/Ab-initio-META/Fluorographane/Freq"
fchk_filename = "".join([DIR,"/FGrph_perylene_symm_9x6_opt_freq.fchk"])
log_filename = "".join([DIR,"/FGrph_perylene_symm_9x6_opt_freq.log"])
from QChemTool.QuantumChem.Classes.molecule import Molecule
mol_gauss = Molecule("Frequency calculation")
mol_gauss.load_Gaussian_fchk(fchk_filename)
mol_gauss.load_Gaussian_log(log_filename)
freq_gauss = mol_gauss.vib_spec['Frequency']
print("Finished reading gaussian input files. ")
# Load initial structure
struc = Structure()
struc.load_xyz("FG_per_small.xyz")
# assign charges
border_C_indx,border_F_indx = get_border_carbons_FG(struc)
if state=='Ground':
struc.esp_grnd = np.zeros(struc.nat,dtype='f8')
charges = struc.esp_grnd # pointer to structure charges
elif state=='Excited':
struc.esp_exct = np.zeros(struc.nat,dtype='f8')
charges = struc.esp_exct # pointer to structure charges
elif state=='Transition':
struc.esp_trans = np.zeros(struc.nat,dtype='f8')
charges = struc.esp_trans # pointer to structure charges
for ii in range(struc.nat):
if struc.at_type[ii] == 'C':
charges[ii] = FG_charges[0]
elif struc.at_type[ii] == 'F':
charges[ii] = -FG_charges[0]
else:
raise Warning("Unknown atom type in structure")
charges[border_C_indx] = 2*FG_charges[1]
charges[border_F_indx] = -FG_charges[1]
def_indx = constrainsFG(struc,border=False,defect=True)
charges[def_indx] = 0.0
# Flatten the defect
nvec,origin = fit_plane(struc.coor._value[def_indx,:] )
struc.coor._value[def_indx,:] = project_on_plane(struc.coor._value[def_indx,:], nvec,origin)
# get defect indexes
use_VinterFG = False
FG_charges = 'ESPfit'
CoarseGrain="plane"
params_polar={"VinterFG": use_VinterFG,"coarse_grain": CoarseGrain,
"charge_type": FG_charges,"approximation": 1.1,"symm": IsotropicPolar}
elstat = {"structure": struc,"charge": FG_charges}
diel = {"structure": struc,"polar": params_polar}
params = {"energy_type": "QC","permivity": 1.0,"order": 2}
system = PolarizableSystem(diel = diel, elstat = elstat, params = params)
# identify defects - separated because now changes can be made to the database
system.identify_defects()
# set ground state charges on defects
for defect in system.defects:
charges[defect.index] = defect.get_charge(state='ground')
print(charges[defect.index])
# =============================================================================
# CALCULATION
# =============================================================================
# Optimize structure
NM_info, indx_orig2new_atoms, indx_new2orig_atoms = get_AMBER_MD_normal_modes(struc,state=state,gen_input=True,**FF_param)
#RMSD,struc,struc_old = Optimize_MD_AMBER_structure("nab_input.frcmod",struc,state=state,gen_input=True,struc_out=True,**FF_param)
struc = NM_info['struc']
#RMSD = RMSD*conv_pos["Angstrom"]
#print(RMSD)
indx_orig2new = np.zeros((struc.nat,3),dtype='i8')
indx_new2orig = np.zeros((struc.nat,3),dtype='i8')
for ii in range(3):
indx_orig2new[:,ii] = indx_orig2new_atoms*3+ii
indx_new2orig[:,ii] = indx_new2orig_atoms*3+ii
indx_orig2new = indx_orig2new.reshape(3*struc.nat)
indx_new2orig = indx_new2orig.reshape(3*struc.nat)
# =============================================================================
# Calculate difference between MM and QC NMA
# =============================================================================
def relative_NM_difference(NM_info,int2cart_gauss):
AngleMat = np.rad2deg( np.arccos( np.dot(NM_info['int2cart'].T,int2cart_gauss)))
AngleMat = np.abs(AngleMat)
indx_corr = np.zeros(AngleMat.shape[0],dtype='i8')
angle = np.zeros(AngleMat.shape[0],dtype='i8')
for ii in range(AngleMat.shape[0]):
minmax = [np.argmin(AngleMat[ii,:]),np.argmax(AngleMat[ii,:])]
if AngleMat[ii,minmax[0]] > np.abs(180-AngleMat[ii,minmax[1]]):
index = minmax[1]
angle[ii] = 180-AngleMat[ii,index]
else:
index = minmax[0]
angle[ii] = AngleMat[ii,index]
indx_corr[ii] = index
# check if unique
dist = np.sum( np.abs( NM_info['freq'] - freq_gauss[indx_corr] )/freq_gauss[indx_corr])
return dist,indx_corr,angle
# reorder gaussian frequencies
if compare_w_gauss or optimize:
print("Calculating normal modes from gaussian hessian... ")
Freqcm1,RedMass,ForcesCm1Agstrom2,InternalToCartesian,CartesianToInternal,Units = NormalModes_mol(mol_gauss)
#int2cart_gauss = InternalToCartesian[indx_orig2new,:]
int2cart_gauss = InternalToCartesian
# NM_info['int2cart'] = NM_info['int2cart'][indx_new2orig,:]
print("Finished calculating normal modes. ")
if 0:
dif_vec = np.sum(np.linalg.norm(NM_info['int2cart'] - int2cart_gauss, axis=0) ) # sum of diferences
else:
dif_vec = np.sum( np.arccos( np.sum( NM_info['int2cart'] * int2cart_gauss ,axis=0) ) ) # sum of angles (in radians)
dist, indx_corr, angle = relative_NM_difference(NM_info,int2cart_gauss)
# =============================================================================
# Plot results
# =============================================================================
# reorder gaussian frequencies
if compare_w_gauss or optimize:
# plot histogram
plt.figure(2,figsize=(12,8))
step = 50
bins = np.arange(0,max(NM_info['freq'][-1],freq_gauss[-1]),50.0)
plt.hist(NM_info['freq'], alpha=0.5, normed=False, bins=bins, label='AMBER MD')
if compare_w_gauss or optimize:
plt.hist(freq_gauss, alpha=0.5, normed=False, bins=bins, label='Gaussian09')
plt.legend(["MD results","QC results"])
else:
plt.legend(["MD results"])
plt.xlabel('Frequency');
plt.xlabel('Count');
plt.show()
print("Reorientation molecule...")
#struc = orientFG(struc)
# reorder gaussian frequencies
if compare_w_gauss or optimize:
mol_gauss.rotate(0.0,np.pi/2.0,0.0)
mol_gauss.output_to_xyz("struc_reorient_gauss.xyz")
struc.output_to_xyz("struc_reorient_FG.xyz")
print("Reorientation molecule DONE")
# Initialize the system
use_VinterFG = False
FG_charges = 'ESPfit'
CoarseGrain="plane"
params_polar={"VinterFG": use_VinterFG,"coarse_grain": CoarseGrain,
"charge_type": FG_charges,"approximation": 1.1,"symm": IsotropicPolar}
elstat = {"structure": struc,"charge": FG_charges}
diel = {"structure": struc,"polar": params_polar}
params = {"energy_type": "QC","permivity": 1.0,"order": 2}
system = PolarizableSystem(diel = diel, elstat = elstat, params = params)
# identify defects - separated because now changes can be made to the database
system.identify_defects()
# correct for the negative frequency
for ii in range(len(NM_info["freq"])):
MASK = np.where(NM_info["freq"]>0.0)
print("Calculating system bth coupling...")
# Calculate coupling MM
Int2Cart = NM_info["int2cart"][:,MASK]
Freq = NM_info["freq"][MASK]
RedMass = NM_info["RedMass"][MASK]
print(system.get_SingleDefectProperties(0)[0].value)
g00 = system.get_gmm(0,Int2Cart,Freq,RedMass)
print("Calculating system bth coupling DONE")
# reorder gaussian frequencies
if compare_w_gauss or optimize:
# Initialize the system
use_VinterFG = False
FG_charges = 'ESPfit'
CoarseGrain="plane"
params_polar={"VinterFG": use_VinterFG,"coarse_grain": CoarseGrain,
"charge_type": FG_charges,"approximation": 1.1,"symm": IsotropicPolar}
elstat = {"structure": mol_gauss.struc,"charge": FG_charges}
diel = {"structure": mol_gauss.struc,"polar": params_polar}
params = {"energy_type": "QC","permivity": 1.0,"order": 2}
system_gauss = PolarizableSystem(diel = diel, elstat = elstat, params = params)
# identify defects - separated because now changes can be made to the database
system_gauss.identify_defects()
print("Calculating system bth coupling Gauss...")
# correct for the negative frequency
for ii in range(len(Freqcm1)):
MASK = np.where(Freqcm1>0.0)
# Calculate coupling Gaussinan
Int2Cart_gauss = mol_gauss.vib_spec['NMinCart'][:,MASK]
Freq_gauss = mol_gauss.vib_spec['Frequency'][MASK]
RedMass_gauss = mol_gauss.vib_spec['RedMass'][MASK]
print(system_gauss.get_SingleDefectProperties(0)[0].value)
g00_gauss = system_gauss.get_gmm(0,Int2Cart_gauss,Freq_gauss,RedMass_gauss)
print("Calculating system bth coupling Gauss DONE")
params_specdens_gauss={"sheer": 25.0, "coupling": g00_gauss, "freq": Freq_gauss, "ftype": "DiscreteModes"}
params_specdens={"sheer": 25.0, "coupling": g00, "freq": Freq, "ftype": "DiscreteModes"}
start=-5.0
step=0.0005
length=20000
plt.figure(3,figsize=(12,8))
freq_axis = FrequencyAxis(start=start,length=length,step=step,time_start=0.0)
with frequency_units("1/cm"):
specdens = SpectralDensity(axis = freq_axis, params = params_specdens)
specdens.plot()
# reorder gaussian frequencies
if compare_w_gauss or optimize:
specdens_gauss = SpectralDensity(axis = freq_axis, params = params_specdens_gauss)
specdens_gauss.plot()
plt.legend(["MD calculation","QC calculation"])
# NM_info["int2cart"] = InternalToCartesian
# NM_info["cart2int"] = CartesianToInternal
# NM_info["freq"] = Freqcm1
# NM_info["RedMass"] = RedMass
# NM_info['force'] = ForcesCm1Agstrom2
# NM_info['units'] = {"freq": "1/cm", "RedMass": "AMU(atomic mass units)",
# "force": "1/(cm * Angstrom^2)", "int2cart": "dimensionles",
# 'cart2int': "dimensionles"}
# {'equilibrium': {'cb-cb-c3': 93.821, 'c3-c3-c3': 112.575, 'c3-cb-c3': 99.781, 'c3-f': 1.45975, 'c3-c3': 1.50398, 'cb-cb': 1.58349, 'cb-c3': 1.58027}, 'force': {'cb-cb-c3': 46.161964772446098, 'c3-cb-c3': 46.161964772446098, 'cb-cb-cb': 46.161964772446098, 'c3-c3-f': 38.338517296715168, 'c3-c3-c3': 46.161964772446098, 'c3-c3': 149.92644957647627, 'cb-cb-fb': 38.338517296715168, 'c3-f': 281.97958014222456, 'cb-c3-c3': 46.161964772446098, 'cb-fb': 281.97958014222456, 'c3-cb-fb': 38.338517296715168, 'cb-cb': 281.64140482308568, 'cb-c3-cb': 46.161964772446098, 'cb-c3': 133.4506671439552}}
# {'equilibrium': {'cb-cb-c3': 88.017241498952885, 'c3-c3-c3': 107.15294657539577, 'c3-cb-c3': 95.525131045009203, 'c3-f': 1.4540369721579582, 'c3-c3': 1.4606626390987845, 'cb-cb': 1.5740194652571133, 'cb-c3': 1.5300819088244113}, 'force': {'cb-cb-c3': 38.181232448725083, 'c3-cb-fb': 23.018474159608967, 'c3-cb-c3': 38.181232448725083, 'cb-cb': 261.70794675559847, 'c3-c3-f': 23.018474159608967, 'c3-c3-c3': 38.181232448725083, 'c3-c3': 166.58376517157259, 'cb-cb-fb': 23.018474159608967, 'c3-f': 275.70889394317834, 'cb-c3-c3': 38.181232448725083, 'cb-fb': 275.70889394317834, 'cb-c3-cb': 38.181232448725083, 'cb-cb-cb': 38.181232448725083, 'cb-c3': 147.70243577927974}}
# {'equilibrium': {'cb-cb-c3': 83.002735221304803, 'c3-cb-c3': 91.953627226648081, 'c3-f': 1.4600792858235836, 'cb-cb': 1.5830393950969697, 'c3-c3-c3': 111.52980555393397, 'c3-c3': 1.4966281114555786, 'cb-c3': 1.5824458486038846}, 'force': {'cb-cb-c3': 37.607329454133364, 'c3-cb-c3': 37.607329454133364, 'c3-f': 333.61967877205979, 'c3-c3-c3': 37.607329454133364, 'cb-c3-cb': 37.607329454133364, 'cb-c3': 322.57965539078702, 'c3-c3-f': 56.164723495359347, 'cb-cb-cb': 37.607329454133364, 'cb-fb': 333.61967877205979, 'cb-cb': 314.764473231419, 'c3-cb-fb': 56.164723495359347, 'c3-c3': 263.10858646606357, 'cb-cb-fb': 56.164723495359347, 'cb-c3-c3': 37.607329454133364}}
# {'equilibrium': {'cb-cb-c3': 83.404720470460035, 'c3-cb-c3': 92.207727759399972, 'c3-f': 1.4598453348432738, 'cb-cb': 1.5827041906234309, 'c3-c3-c3': 111.40233200658345, 'c3-c3': 1.4965158732230384, 'cb-c3': 1.5826003612640041}, 'force': {'cb-cb-c3': 38.142332310822596, 'c3-cb-c3': 38.142332310822596, 'c3-f': 334.05827604837214, 'c3-c3-c3': 38.142332310822596, 'cb-c3-cb': 38.142332310822596, 'cb-c3': 321.8363136727844, 'c3-c3-f': 55.486818374753867, 'cb-cb-cb': 38.142332310822596, 'cb-fb': 334.05827604837214, 'cb-cb': 317.39659827235744, 'c3-cb-fb': 55.486818374753867, 'c3-c3': 262.36090055711145, 'cb-cb-fb': 55.486818374753867, 'cb-c3-c3': 38.142332310822596}}
#{'equilibrium': {'c3-c3': 1.4966680911551919, 'c3-cb-c3': 63.047340462610443, 'cb-c3': 1.5828756492868352, 'cb-cb': 1.5835608546924587, 'cb-cb-c3': 44.432428791147352, 'c3-f': 1.4612061033677017, 'c3-c3-c3': 106.08166760218445}, 'force': {'c3-c3-f': 40.973122024504754, 'c3-c3': 259.68257919280018, 'cb-c3-cb': 13.889921204782778, 'cb-c3': 327.07344313991069, 'cb-cb-c3': 13.889921204782778, 'c3-cb-c3': 13.889921204782778, 'cb-c3-c3': 13.889921204782778, 'cb-fb': 335.36014666185804, 'cb-cb-cb': 13.889921204782778, 'cb-cb': 304.61438860703259, 'c3-cb-fb': 40.973122024504754, 'c3-f': 335.36014666185804, 'cb-cb-fb': 40.973122024504754, 'c3-c3-c3': 13.889921204782778}}
#112.865058446
#128.382625987 | 51.264438 | 673 | 0.6589 |
403d89e2b8671d5ef941798e95bad2c29298db77 | 1,069 | py | Python | test/test_fastqc.py | laperlej/geecq | fd91fb7bbde9913993cca1f8b4bc566c76a24284 | [
"MIT"
] | null | null | null | test/test_fastqc.py | laperlej/geecq | fd91fb7bbde9913993cca1f8b4bc566c76a24284 | [
"MIT"
] | null | null | null | test/test_fastqc.py | laperlej/geecq | fd91fb7bbde9913993cca1f8b4bc566c76a24284 | [
"MIT"
] | null | null | null | """
use
python -m unittest discover
or
python -m unittest test.test_fastqc
from the main folder
"""
from geecq.fastqc import Fastqc
import unittest
import os
ROOTDIR = os.path.dirname(__file__)
class TestFastqc(unittest.TestCase):
def is_loaded(self, fastqc):
self.assertTrue(fastqc.name)
self.assertTrue(fastqc.version)
self.assertTrue(fastqc.nb_sequences)
self.assertTrue(fastqc.gc_content)
self.assertTrue(fastqc.pos_quality)
self.assertTrue(fastqc.seq_length)
self.assertTrue(fastqc.qual)
self.assertTrue(fastqc.dup)
def test_load_from_string(self):
fastqc = Fastqc()
fastqc.load_from_string(open(ROOTDIR + '/fastqc_v10.txt', 'r').read())
self.is_loaded(fastqc)
def test_load_from_file(self):
fastqc = Fastqc()
fastqc.load_from_file(ROOTDIR + '/fastqc_v10.txt')
self.is_loaded(fastqc)
def test_load_from_zip(self):
fastqc = Fastqc()
fastqc.load_from_zip(ROOTDIR + '/fastqc_v10.zip')
self.is_loaded(fastqc)
| 26.073171 | 78 | 0.68101 |
b037212fdf21ce2d0dc9981752a8b808ec13a645 | 13,183 | py | Python | trunk/bin/queryipac.py | svalenti/agnkey | d44831a8ae72de0b2692da047994f67545185503 | [
"MIT"
] | null | null | null | trunk/bin/queryipac.py | svalenti/agnkey | d44831a8ae72de0b2692da047994f67545185503 | [
"MIT"
] | null | null | null | trunk/bin/queryipac.py | svalenti/agnkey | d44831a8ae72de0b2692da047994f67545185503 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import string
import sys
import datetime
import agnkey
import glob
from agnkey.util import readkey3
from optparse import OptionParser
description="> ingest 1m and 2m data "
usage= '%prog -e epoch [-s stage -n name -f filter -d idnumber]\n available stages [wcs,psf,psfmag,zcat,abscat,' \
'mag,local,getmag]\n'
def ingestlist(_force='yes'):
filetype=1
lista=glob.glob('*fits')
for img in lista:
output=string.split(img,'/')[-1]
exist=agnkey.agnsqldef.getfromdataraw(agnkey.agnsqldef.conn,dataredutable,'namefile',
string.split(output,'/')[-1],column2='namefile')
if not exist or _force in ['update','yes']:
hdr = agnkey.util.readhdr(output)
_targid = agnkey.agnsqldef.targimg(output)
_instrument = readkey3(hdr,'instrume')
if _instrument in agnkey.util.instrument0['sbig']+agnkey.util.instrument0['sinistro']:
print '1m telescope'
dictionary = {'dateobs':readkey3(hdr,'date-obs'),'exptime':readkey3(hdr,'exptime'),
'filter':readkey3(hdr,'filter'),'jd':readkey3(hdr,'JD'),
'telescope':readkey3(hdr,'telescop'),'airmass':readkey3(hdr,'airmass'),
'objname':readkey3(hdr,'object'),'ut':readkey3(hdr,'ut'),
'wcs':readkey3(hdr,'wcserr'),'instrument':readkey3(hdr,'instrume'),
'ra0':readkey3(hdr,'RA'),'dec0':readkey3(hdr,'DEC'),'observer':readkey3(hdr,'OBSERVER'),
'propid':readkey3(hdr,'PROPID'),'USERID':readkey3(hdr,'USERID'),
'temperature':readkey3(hdr,'CCDATEMP'),'dateobs2':readkey3(hdr,'DATE-OBS')}
dictionary['namefile'] = string.split(output,'/')[-1]
dictionary['wdirectory'] = agnkey.util.workingdirectory+'1mtel/'+readkey3(hdr,'date-night')+'/'
dictionary['filetype'] = filetype
dictionary['targid'] = _targid
print 'insert reduced'
elif _instrument in agnkey.util.instrument0['spectral']:
print '2m telescope'
dictionary={'dateobs' : readkey3(hdr,'date-obs'),'exptime' : readkey3(hdr,'exptime'),
'filter' : readkey3(hdr,'filter'),'jd' : readkey3(hdr,'JD'),
'telescope' : readkey3(hdr,'telescop'),'airmass' : readkey3(hdr,'airmass'),
'objname' : readkey3(hdr,'object'),'ut' : readkey3(hdr,'ut'),'wcs' : readkey3(hdr,'wcserr'),
'instrument' : readkey3(hdr,'instrume'),'ra0' : readkey3(hdr,'RA'),
'dec0' : readkey3(hdr,'DEC'), 'observer' : readkey3(hdr,'OBSERVER'),
'propid' : readkey3(hdr,'PROPID'), 'USERID' : readkey3(hdr,'USERID'),
'temperature' : readkey3(hdr,'CCDATEMP'), 'dateobs2' : readkey3(hdr,'DATE-OBS')}
dictionary['namefile'] = string.split(output,'/')[-1]
dictionary['wdirectory'] = agnkey.util.workingdirectory + '1mtel/' + readkey3(hdr,'date-night') + '/'
dictionary['filetype'] = filetype
dictionary['targid'] = _targid
print 'insert reduced'
else: dictionary=''
if dictionary:
if not exist:
print 'insert values'
agnkey.agnsqldef.insert_values(agnkey.agnsqldef.conn,dataredutable,dictionary)
else:
print dataredutable
print 'update values'
for voce in dictionary:
print voce
for voce in ['wdirectory','filetype','ra0','dec0','jd','exptime','filter']:
agnkey.agnsqldef.updatevalue(dataredutable,voce,dictionary[voce],
string.split(output,'/')[-1])
######################################
if not os.path.isdir(dictionary['wdirectory']):
os.mkdir(dictionary['wdirectory'])
if not os.path.isfile(dictionary['wdirectory'] + output) or _force == 'yes':
print 'mv ' + output + ' ' + dictionary['wdirectory'] + output
os.system('mv '+output + ' '+dictionary['wdirectory'] + output)
os.chmod(dictionary['wdirectory'] + output,0664)
else:
print 'dictionary empty'
else:
print '\n### file already there '+output
#############################################################################################################
def ingestfromipac(d0s,d1s,dataredutable,archa1,archa2,_force):
filetype = 1
tfile = './tempdat_img.asc'
cols = ['userid','propid','origname','FILEHAND']
str0 = 'wget --save-cookies=lcogt_img_cookies.txt'
str0a = 'wget --load-cookies=lcogt_img_cookies.txt'
str1 = ' --password='
str2 = ' -O '
str3 = ' "http://lcogtarchive.ipac.caltech.edu/cgi-bin/Gator/nph-query?'
str4 = 'outfmt=1&catalog=lco_img&mission=lcogt&'
str5 = 'spatial=NONE&' # 1/2 deg square box
str6 = 'selcols=' # cols to return
for i in range(0,len(cols)-2):
str6=str6+cols[i]+','
str6 = str6 + cols[len(cols)-1] + '&'
str7 = "constraints=date_obs+between+to_date('"+d0s+"','YYYY-MM-DD')+and+"+\
"to_date('"+d1s+"','YYYY-MM-DD')"
str10 = '/dev/null '
str11 = '"http://irsa.ipac.caltech.edu/account/signon/login.do?'
str12 = '"http://irsa.ipac.caltech.edu/account/signon/logout.do'
str13 = 'josso_cmd=login&'
str14 = 'josso_username='
str15 = 'josso_password='
wget0 = str0 + str2 + str10 + str11 + str13 + str14 + archa1 + str15 + archa2 + '"'
wget1 = str0a + str2 + tfile + str3 + str4 + str5 + str6 + str7 + '"'
wget2 = str0 + str2 + str10 + str12 + '"'
print wget0
print wget1
os.system(wget0) # download cookie from ipac
os.system(wget1) # download list of images to ingest
################# exstract images path to download ############
f=open('tempdat_img.asc','r')
ss=f.readlines()
f.close()
lista=[]
for i in ss:
if i[0] not in ['|','\\']:
if string.split(i)[-2] in agnkey.util.readpass['proposal']:
lista.append(string.split(i)[-1])
for img in lista:
output = string.split(img,'/')[-1]
downloadimage = 'wget --load-cookies=lcogt_img_cookies.txt -O ' + str(output) + \
' "http://lcogtarchive.ipac.caltech.edu/cgi-bin/LCODownload/nph-lcoDownload?&file=' + img + '"'
exist = agnkey.agnsqldef.getfromdataraw(agnkey.agnsqldef.conn,dataredutable,'namefile',
string.split(output,'/')[-1],column2='namefile')
if not exist or _force in ['update','yes']:
if not os.path.isfile(output):
os.system(downloadimage)
hdr = agnkey.util.readhdr(output)
_targid = agnkey.agnsqldef.targimg(output)
_instrument = readkey3(hdr,'instrume')
if _instrument in agnkey.util.instrument0['sbig']+agnkey.util.instrument0['sinistro']:
print '1m telescope'
dictionary = {'dateobs' : readkey3(hdr,'date-obs'),'exptime' : readkey3(hdr,'exptime'),
'filter' : readkey3(hdr,'filter'),'jd' : readkey3(hdr,'JD'),
'telescope' : readkey3(hdr,'telescop'),'airmass':readkey3(hdr,'airmass'),
'objname':readkey3(hdr,'object'),'ut':readkey3(hdr,'ut'),
'wcs' : readkey3(hdr,'wcserr'),'instrument' : readkey3(hdr,'instrume'),
'ra0' : readkey3(hdr,'RA'),'dec0' : readkey3(hdr,'DEC'),
'observer' : readkey3(hdr,'OBSERVER'),'propid' : readkey3(hdr,'PROPID'),
'USERID' : readkey3(hdr,'USERID'),'temperature' : readkey3(hdr,'CCDATEMP'),
'dateobs2':readkey3(hdr,'DATE-OBS')}
dictionary['namefile'] = string.split(output,'/')[-1]
dictionary['wdirectory'] = agnkey.util.workingdirectory + '1mtel/' + readkey3(hdr,'date-night') +'/'
dictionary['filetype'] = filetype
dictionary['targid'] = _targid
print 'insert reduced'
# else: dictionary=''
elif _instrument in agnkey.util.instrument0['spectral']:
print '2m telescope'
dictionary = {'dateobs' : readkey3(hdr,'date-obs'),'exptime' : readkey3(hdr,'exptime'),
'filter' : readkey3(hdr,'filter'), 'jd' : readkey3(hdr,'JD'),
'telescope' : readkey3(hdr,'telescop'),'airmass' : readkey3(hdr,'airmass'),
'objname' : readkey3(hdr,'object'), 'ut' : readkey3(hdr,'ut'),
'wcs' : readkey3(hdr,'wcserr'),'instrument' : readkey3(hdr,'instrume'),
'ra0' : readkey3(hdr,'RA'), 'dec0' : readkey3(hdr,'DEC'),
'observer' : readkey3(hdr,'OBSERVER'),'propid':readkey3(hdr,'PROPID'),
'USERID' : readkey3(hdr,'USERID'),'temperature' : readkey3(hdr,'CCDATEMP'),
'dateobs2' : readkey3(hdr,'DATE-OBS')}
dictionary['namefile'] = string.split(output,'/')[-1]
dictionary['wdirectory'] = agnkey.util.workingdirectory + '1mtel/' + readkey3(hdr,'date-night') + '/'
dictionary['filetype'] = filetype
dictionary['targid'] = _targid
print 'insert reduced'
else:
dictionary = ''
if dictionary:
if not exist:
print 'insert values'
agnkey.agnsqldef.insert_values(agnkey.agnsqldef.conn,dataredutable,dictionary)
else:
print dataredutable
print 'update values'
for voce in dictionary:
print voce
for voce in ['wdirectory','filetype','ra0','dec0','jd','exptime','filter']:
agnkey.agnsqldef.updatevalue(dataredutable,voce,dictionary[voce],
string.split(output,'/')[-1])
######################################
if not os.path.isdir(dictionary['wdirectory']):
os.mkdir(dictionary['wdirectory'])
if not os.path.isfile(dictionary['wdirectory'] + output) or _force=='yes':
print 'mv ' + output + ' ' + dictionary['wdirectory'] + output
os.system('mv ' + output+' ' + dictionary['wdirectory'] + output)
os.chmod(dictionary['wdirectory']+output,0664)
else:
print 'dictionary empty'
else:
print '\n### file already there '+output
os.system(wget2) # close connection for download
############################################################################################################
if __name__ == "__main__":
parser = OptionParser(usage=usage,description=description, version="%prog 1.0")
parser.add_option("-e", "--epoch",dest="epoch",default='20121212',type="str",
help='epoch to reduce \t [%default]')
parser.add_option("-f", "--force",dest="force",default='no',type="str",
help='force ingestion \t [no/yes/update] \n')
parser.add_option("-t", "--type",dest="typeing",default='ipac',type="str",
help='type of ingestion \t [ipac/tar] \n')
option,args = parser.parse_args()
if option.force not in ['no','yes','update']:
sys.argv.append('--help')
if option.typeing not in ['ipac','tar']:
sys.argv.append('--help')
option,args = parser.parse_args()
epoch = option.epoch
_force = option.force
_type = option.typeing
if '-' not in str(epoch):
epoch1 = datetime.date(int(epoch[0:4]),int(epoch[4:6]),int(epoch[6:8]))
epoch2 = datetime.date(int(epoch[0:4]),int(epoch[4:6]),int(epoch[6:8]))
else:
epoch1,epoch2 = string.split(epoch,'-')
start = datetime.date(int(epoch1[0:4]),int(epoch1[4:6]),int(epoch1[6:8]))
stop = datetime.date(int(epoch2[0:4]),int(epoch2[4:6]),int(epoch2[6:8]))
archa1 = agnkey.util.readpass['ipacuser']+'&'
archa2 = agnkey.util.readpass['ipacpasswd']
dataredutable = 'dataredulco'
print epoch1,epoch2
print _force
if _type == 'ipac':
ingestfromipac(str(epoch1),str(epoch2),dataredutable,archa1,archa2,_force)
elif _type == 'tar':
ingestlist(_force)
| 56.097872 | 120 | 0.518319 |
98605fa797ecc2121d6c44be2911aa569fb2a1c9 | 3,481 | py | Python | pyfr/backends/hip/types.py | YuWangTAMU/PyFR | 99ff5fe1ec1ff0dcefcf6222488c990c6839aac3 | [
"BSD-3-Clause"
] | null | null | null | pyfr/backends/hip/types.py | YuWangTAMU/PyFR | 99ff5fe1ec1ff0dcefcf6222488c990c6839aac3 | [
"BSD-3-Clause"
] | null | null | null | pyfr/backends/hip/types.py | YuWangTAMU/PyFR | 99ff5fe1ec1ff0dcefcf6222488c990c6839aac3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import pyfr.backends.base as base
from pyfr.util import make_pybuf
class _HIPMatrixCommon(object):
@property
def _as_parameter_(self):
return self.data
class HIPMatrixBase(_HIPMatrixCommon, base.MatrixBase):
def onalloc(self, basedata, offset):
self.basedata = basedata
self.data = int(self.basedata) + offset
self.offset = offset
# Process any initial value
if self._initval is not None:
self._set(self._initval)
# Remove
del self._initval
def _get(self):
# Allocate an empty buffer
buf = np.empty((self.nrow, self.leaddim), dtype=self.dtype)
# Copy
self.backend.hip.memcpy(buf, self.data, self.nbytes)
# Unpack
return self._unpack(buf[:, :self.ncol])
def _set(self, ary):
# Allocate a new buffer with suitable padding and pack it
buf = np.zeros((self.nrow, self.leaddim), dtype=self.dtype)
buf[:, :self.ncol] = self._pack(ary)
# Copy
self.backend.hip.memcpy(self.data, buf, self.nbytes)
class HIPMatrix(HIPMatrixBase, base.Matrix):
pass
class HIPMatrixSlice(_HIPMatrixCommon, base.MatrixSlice):
def _init_data(self, mat):
return (int(mat.basedata) + mat.offset +
self.ra*self.pitch + self.ca*self.itemsize)
class HIPMatrixBank(base.MatrixBank):
pass
class HIPConstMatrix(HIPMatrixBase, base.ConstMatrix):
pass
class HIPView(base.View):
pass
class HIPXchgMatrix(HIPMatrix, base.XchgMatrix):
def __init__(self, backend, ioshape, initval, extent, aliases, tags):
# Call the standard matrix constructor
super().__init__(backend, ioshape, initval, extent, aliases, tags)
# If MPI is HIP-aware then construct a buffer out of our HIP
# device allocation and pass this directly to MPI
if backend.mpitype == 'hip-aware':
self.hdata = make_pybuf(self.data, self.nbytes, 0x200)
# Otherwise, allocate a buffer on the host for MPI to send/recv from
else:
shape, dtype = (self.nrow, self.ncol), self.dtype
self.hdata = backend.hip.pagelocked_empty(shape, dtype)
class HIPXchgView(base.XchgView):
pass
class HIPQueue(base.Queue):
def __init__(self, backend):
super().__init__(backend)
# HIP streams
self.hip_stream_comp = backend.hip.create_stream()
self.hip_stream_copy = backend.hip.create_stream()
def _wait(self):
if self._last_ktype == 'compute':
self.hip_stream_comp.synchronize()
self.hip_stream_copy.synchronize()
elif self._last_ktype == 'mpi':
from mpi4py import MPI
MPI.Prequest.Waitall(self.mpi_reqs)
self.mpi_reqs = []
self._last_ktype = None
def _at_sequence_point(self, item):
return self._last_ktype != item.ktype
@staticmethod
def runall(queues):
# First run any items which will not result in an implicit wait
for q in queues:
q._exec_nowait()
# So long as there are items remaining in the queues
while any(queues):
# Execute a (potentially) blocking item from each queue
for q in filter(None, queues):
q._exec_next()
q._exec_nowait()
# Wait for all tasks to complete
for q in queues:
q._wait()
| 27.409449 | 76 | 0.62913 |
f621ca64e22070a207ebf200f649e49ed5152ce6 | 13,413 | py | Python | 7.33.0.dev0/ietf/secr/telechat/tests.py | kesara/ietf-datatracker | dca3ee2ee98bcb75a10687587cf631750be34c79 | [
"Unlicense"
] | null | null | null | 7.33.0.dev0/ietf/secr/telechat/tests.py | kesara/ietf-datatracker | dca3ee2ee98bcb75a10687587cf631750be34c79 | [
"Unlicense"
] | null | null | null | 7.33.0.dev0/ietf/secr/telechat/tests.py | kesara/ietf-datatracker | dca3ee2ee98bcb75a10687587cf631750be34c79 | [
"Unlicense"
] | null | null | null | # Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
from pyquery import PyQuery
import debug # pyflakes:ignore
from django.urls import reverse
from ietf.doc.factories import (WgDraftFactory, IndividualRfcFactory, CharterFactory,
IndividualDraftFactory, ConflictReviewFactory)
from ietf.doc.models import BallotDocEvent, BallotType, BallotPositionDocEvent, State, Document
from ietf.doc.utils import update_telechat, create_ballot_if_not_open
from ietf.utils.test_utils import TestCase
from ietf.iesg.models import TelechatDate
from ietf.person.models import Person
from ietf.person.factories import PersonFactory
from ietf.secr.telechat.views import get_next_telechat_date
SECR_USER='secretary'
def augment_data():
TelechatDate.objects.create(date=datetime.datetime.today())
class SecrTelechatTestCase(TestCase):
def test_main(self):
"Main Test"
augment_data()
url = reverse('ietf.secr.telechat.views.main')
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_doc(self):
"View Test"
augment_data()
d = TelechatDate.objects.all()[0]
date = d.date.strftime('%Y-%m-%d')
url = reverse('ietf.secr.telechat.views.doc', kwargs={'date':date})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_doc_detail_draft(self):
draft = WgDraftFactory(states=[('draft-iesg','pub-req'),])
ad = Person.objects.get(user__username="ad")
create_ballot_if_not_open(None, draft, ad, 'approve')
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
by=Person.objects.get(name="(System)")
update_telechat(None, draft, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':draft.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Yes')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('No Objection')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Discuss')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Abstain')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Recuse')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('No Record')").length,1)
def test_doc_detail_draft_with_downref(self):
ad = Person.objects.get(user__username="ad")
draft = WgDraftFactory(ad=ad, intended_std_level_id='ps', states=[('draft-iesg','pub-req'),])
rfc = IndividualRfcFactory.create(stream_id='irtf', other_aliases=['rfc6666',],
states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', )
draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'),
relationship_id='refnorm')
create_ballot_if_not_open(None, draft, ad, 'approve')
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
by=Person.objects.get(name="(System)")
update_telechat(None, draft, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':draft.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Has downref: Yes")
self.assertContains(response, "Add rfc6666")
self.assertContains(response, "to downref registry")
def test_doc_detail_draft_invalid(self):
'''Test using a document not on telechat agenda'''
draft = WgDraftFactory(states=[('draft-iesg','pub-req'),])
date = get_next_telechat_date().strftime('%Y-%m-%d')
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':draft.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse('ietf.secr.telechat.views.doc', kwargs={'date':date}))
self.assertContains(response, 'not on the Telechat agenda')
def test_doc_detail_conflict_review_no_ballot(self):
IndividualDraftFactory(name='draft-imaginary-independent-submission')
review = ConflictReviewFactory(name='conflict-review-imaginary-irtf-submission',review_of=IndividualDraftFactory(name='draft-imaginary-irtf-submission',stream_id='irtf'),notify='notifyme@example.net')
by=Person.objects.get(name="(System)")
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
update_telechat(None, review, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':review.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_doc_detail_charter(self):
by=Person.objects.get(name="(System)")
charter = CharterFactory(states=[('charter','intrev')])
last_week = datetime.date.today()-datetime.timedelta(days=7)
BallotDocEvent.objects.create(type='created_ballot',by=by,doc=charter, rev=charter.rev,
ballot_type=BallotType.objects.get(doc_type=charter.type,slug='r-extrev'),
time=last_week)
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
update_telechat(None, charter, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':charter.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Yes')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('No Objection')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Block')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('Abstain')").length,1)
self.assertEqual(q("#telechat-positions-table").find("th:contains('No Record')").length,1)
def test_bash(self):
today = datetime.datetime.today()
TelechatDate.objects.create(date=today)
url = reverse('ietf.secr.telechat.views.bash',kwargs={'date':today.strftime('%Y-%m-%d')})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_doc_detail_post_update_ballot(self):
by=Person.objects.get(name="(System)")
charter = CharterFactory(states=[('charter','intrev')])
last_week = datetime.date.today()-datetime.timedelta(days=7)
BallotDocEvent.objects.create(type='created_ballot',by=by,doc=charter, rev=charter.rev,
ballot_type=BallotType.objects.get(doc_type=charter.type,slug='r-extrev'),
time=last_week)
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
update_telechat(None, charter, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':charter.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url,{
'submit': 'update_ballot',
'form-INITIAL_FORMS':7,
'form-TOTAL_FORMS':7,
'form-0-name': 'Ops Ad',
'form-0-id': '13',
'form-0-position': 'noobj',
'form-1-name': 'Areað Irector',
'form-1-id': '12',
'form-2-name': 'Ad No1',
'form-2-id': '16',
'form-3-name': 'Ad No2',
'form-3-id': '17',
'form-4-name': 'Ad No3',
'form-4-id': '18',
'form-5-name': 'Ad No4',
'form-5-id': '19',
'form-6-name': 'Ad No5',
'form-6-id': '20',
}
)
self.assertEqual(response.status_code,302)
self.assertTrue(BallotPositionDocEvent.objects.filter(doc=charter, balloter_id=13, pos__slug='noobj').exists())
def test_doc_detail_post_update_state(self):
by=Person.objects.get(name="(System)")
charter = CharterFactory(states=[('charter','intrev')])
last_week = datetime.date.today()-datetime.timedelta(days=7)
BallotDocEvent.objects.create(type='created_ballot',by=by,doc=charter, rev=charter.rev,
ballot_type=BallotType.objects.get(doc_type=charter.type,slug='r-extrev'),
time=last_week)
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
update_telechat(None, charter, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':charter.name})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url,{
'submit': 'update_state',
'state': 83,
'substate': '',
}
)
self.assertEqual(response.status_code,302)
self.assertEqual(charter.get_state('charter').slug,'notrev')
def test_doc_detail_post_update_state_action_holder_automation(self):
"""Updating IESG state of a draft should update action holders"""
by = Person.objects.get(name='(System)')
draft = WgDraftFactory(
states=[('draft-iesg', 'iesg-eva')],
ad=Person.objects.get(user__username='ad'),
authors=PersonFactory.create_batch(3),
)
last_week = datetime.date.today()-datetime.timedelta(days=7)
BallotDocEvent.objects.create(type='created_ballot',by=by,doc=draft, rev=draft.rev,
ballot_type=BallotType.objects.get(doc_type=draft.type,slug='approve'),
time=last_week)
d = get_next_telechat_date()
date = d.strftime('%Y-%m-%d')
update_telechat(None, draft, by, d)
url = reverse('ietf.secr.telechat.views.doc_detail', kwargs={'date':date, 'name':draft.name})
self.client.login(username="secretary", password="secretary+password")
# Check that there are no action holder DocEvents yet
self.assertEqual(draft.docevent_set.filter(type='changed_action_holders').count(), 0)
# setting to defer should add AD, adding need-rev should add authors
response = self.client.post(url,{
'submit': 'update_state',
'state': State.objects.get(type_id='draft-iesg', slug='defer').pk,
'substate': 'need-rev',
})
self.assertEqual(response.status_code,302)
draft = Document.objects.get(name=draft.name)
self.assertEqual(draft.get_state('draft-iesg').slug,'defer')
self.assertCountEqual(draft.action_holders.all(), [draft.ad] + draft.authors())
self.assertEqual(draft.docevent_set.filter(type='changed_action_holders').count(), 1)
# Removing need-rev should remove authors
response = self.client.post(url,{
'submit': 'update_state',
'state': State.objects.get(type_id='draft-iesg', slug='iesg-eva').pk,
'substate': '',
})
self.assertEqual(response.status_code,302)
draft = Document.objects.get(name=draft.name)
self.assertEqual(draft.get_state('draft-iesg').slug,'iesg-eva')
self.assertCountEqual(draft.action_holders.all(), [draft.ad])
self.assertEqual(draft.docevent_set.filter(type='changed_action_holders').count(), 2)
# Setting to approved should remove all action holders
# noinspection DjangoOrm
draft.action_holders.add(*(draft.authors())) # add() with through model ok in Django 2.2+
response = self.client.post(url,{
'submit': 'update_state',
'state': State.objects.get(type_id='draft-iesg', slug='approved').pk,
'substate': '',
})
self.assertEqual(response.status_code,302)
draft = Document.objects.get(name=draft.name)
self.assertEqual(draft.get_state('draft-iesg').slug,'approved')
self.assertCountEqual(draft.action_holders.all(), [])
self.assertEqual(draft.docevent_set.filter(type='changed_action_holders').count(), 3)
| 52.394531 | 208 | 0.638261 |
f8841359ecd4d6865c8d475cd08108c55fe6a2cf | 2,482 | py | Python | MeasurementTool.py | NeverGET/FlatCAM | 5d3d0f2166695687aa319cc56919c971adc82843 | [
"MIT"
] | 63 | 2016-02-24T20:42:58.000Z | 2022-03-13T09:10:17.000Z | MeasurementTool.py | NeverGET/FlatCAM | 5d3d0f2166695687aa319cc56919c971adc82843 | [
"MIT"
] | 7 | 2017-02-03T22:05:22.000Z | 2022-03-01T21:16:26.000Z | MeasurementTool.py | NeverGET/FlatCAM | 5d3d0f2166695687aa319cc56919c971adc82843 | [
"MIT"
] | 27 | 2016-02-24T20:42:58.000Z | 2022-02-17T02:40:16.000Z | from PyQt4 import QtGui
from FlatCAMTool import FlatCAMTool
from copy import copy
from math import sqrt
class Measurement(FlatCAMTool):
toolName = "Measurement Tool"
def __init__(self, app):
FlatCAMTool.__init__(self, app)
# self.setContentsMargins(0, 0, 0, 0)
self.layout.setMargin(0)
self.layout.setContentsMargins(0, 0, 3, 0)
self.setSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Maximum)
self.point1 = None
self.point2 = None
self.label = QtGui.QLabel("Click on a reference point ...")
self.label.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
self.label.setMargin(3)
self.layout.addWidget(self.label)
# self.layout.setMargin(0)
self.setVisible(False)
self.click_subscription = None
self.move_subscription = None
def install(self):
FlatCAMTool.install(self)
self.app.ui.right_layout.addWidget(self)
self.app.plotcanvas.vis_connect('key_press', self.on_key_press)
def run(self):
self.toggle()
def on_click(self, event):
pos = self.app.plotcanvas.vispy_canvas.translate_coords(event.pos)
if self.point1 is None:
self.point1 = pos
else:
self.point2 = copy(self.point1)
self.point1 = pos
self.on_move(event)
def on_key_press(self, event):
if event.key == 'R':
self.toggle()
def toggle(self):
if self.isVisible():
self.setVisible(False)
self.app.plotcanvas.vis_disconnect('mouse_move', self.on_move)
self.app.plotcanvas.vis_disconnect('mouse_release', self.on_click)
else:
self.setVisible(True)
self.app.plotcanvas.vis_connect('mouse_move', self.on_move)
self.app.plotcanvas.vis_connect('mouse_release', self.on_click)
def on_move(self, event):
if self.point1 is None:
self.label.setText("Click on a reference point...")
else:
try:
pos = self.app.plotcanvas.vispy_canvas.translate_coords(event.pos)
dx = pos[0] - self.point1[0]
dy = pos[1] - self.point1[1]
d = sqrt(dx**2 + dy**2)
self.label.setText("D = %.4f D(x) = %.4f D(y) = %.4f" % (d, dx, dy))
except TypeError:
pass
if self.update is not None:
self.update()
| 32.233766 | 86 | 0.598711 |
69553acb483eaf7613d7e8f31eea08d073479607 | 2,364 | py | Python | setup.py | rockneurotiko/mypy | 45ff97ad6fe15a9c8043b479c7e7e277905067a8 | [
"PSF-2.0"
] | null | null | null | setup.py | rockneurotiko/mypy | 45ff97ad6fe15a9c8043b479c7e7e277905067a8 | [
"PSF-2.0"
] | null | null | null | setup.py | rockneurotiko/mypy | 45ff97ad6fe15a9c8043b479c7e7e277905067a8 | [
"PSF-2.0"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import os.path
import sys
from distutils.core import setup
from mypy.version import __version__
if sys.version_info < (3, 2, 0):
sys.stderr.write("ERROR: You need Python 3.2 or later to use mypy.\n")
exit(1)
version = __version__
description = 'Optional static typing for Python'
long_description = '''
Mypy -- Optional Static Typing for Python
=========================================
Add type annotations to your Python programs, and use mypy to type
check them. Mypy is essentially a Python linter on steroids, and it
can catch many programming errors by analyzing your program, without
actually having to run it. Mypy has a powerful type system with
features such as type inference, gradual typing, generics and union
types.
'''.lstrip()
stubs = []
for py_version in ['3.4', '3.3', '3.2', '2.7']:
base = os.path.join('stubs', py_version)
if not os.path.exists(base):
os.mkdir(base)
stub_dirs = ['']
for root, dirs, files in os.walk(base):
stub_dirs.extend(os.path.relpath(os.path.join(root, stub_dir), base)
for stub_dir in dirs
if stub_dir != '__pycache__')
for stub_dir in stub_dirs:
target = os.path.join('lib', 'mypy', 'stubs', py_version, stub_dir)
files = glob.glob(os.path.join(base, stub_dir, '*.py'))
files += glob.glob(os.path.join(base, stub_dir, '*.pyi'))
stubs.append((target, files))
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
]
setup(name='mypy-lang',
version=version,
description=description,
long_description=long_description,
author='Jukka Lehtosalo',
author_email='jukka.lehtosalo@iki.fi',
url='http://www.mypy-lang.org/',
license='MIT License',
platforms=['POSIX'],
package_dir={'': 'lib-typing/3.2', 'mypy': 'mypy'},
py_modules=['typing'],
packages=['mypy'],
scripts=['scripts/mypy'],
data_files=stubs,
classifiers=classifiers,
)
| 31.52 | 76 | 0.634095 |
2e2a31ff8f03188ce760351c7e111694d09cfc7a | 18,549 | py | Python | app/models/game.py | PapaCharlie/SteamReviews | 41a72c71314c93bf9ffdd3e17d66f4e458a2411b | [
"MIT"
] | null | null | null | app/models/game.py | PapaCharlie/SteamReviews | 41a72c71314c93bf9ffdd3e17d66f4e458a2411b | [
"MIT"
] | null | null | null | app/models/game.py | PapaCharlie/SteamReviews | 41a72c71314c93bf9ffdd3e17d66f4e458a2411b | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import csv
import base64
import json
import logging
import numpy as np
import os
import requests
import re
import sys
import time
from . import Review
from app import app
from app.dynamodb import db, utils
from app.utils import data_file, mallet_file
from bs4 import BeautifulSoup
from boto3.dynamodb.conditions import Key, Attr
from decimal import Decimal
from datetime import datetime
from functools import partial
from itertools import islice, imap
from Levenshtein import distance
from sklearn.preprocessing import normalize as normalize_matrix
reviews_re = re.compile(r"\(([0-9,]+) reviews?\)")
userscore_to_digit = {
"Overwhelmingly Positive": 8,
"Very Positive": 7,
"Positive": 6,
"Mostly Positive": 5,
"Mixed": 4,
"Mostly Negative": 3,
"Negative": 2,
"Very Negative": 1,
"Overwhelmingly Negative": 0
}
digit_to_userscore = {score: r for r,score in userscore_to_digit.iteritems()}
MAX_SPIDER_FEATURES = app.config["MAX_SPIDER_FEATURES"]
class GameNotFoundException(Exception):
def __init__(self, app_id):
super(GameNotFoundException, self).__init__("Game %s does not exist!"%app_id)
class Game(object):
table_name = "apps"
table = db.Table(table_name)
hash_key = ("app_id", utils.NUMBER)
sorting_key = None
__app_ids = None
__app_id_to_index = None
__compressed_matrix = None
__ranking = None
__game_cache = None
__name_inverted_index = None
__dimensions = None
__dimensions_inverted_index = None
@classmethod
def _load_caches(cls):
# cls.__app_ids, cls.__compressed_matrix = load_compressed_matrix()
cls.__app_ids, cls.__compressed_matrix = load_mallet_matrix()
# So we can pre-compute the ranking for every single game, since the compressed matrix is
# static per instance. Saves us a couple cycles
cls.__similarities = cls.__compressed_matrix.dot(cls.__compressed_matrix.T)
cls.__ranking = np.array([np.argsort(row)[::-1] for row in cls.__similarities])
cls.__app_id_to_index = {app_id: i for i, app_id in enumerate(cls.__app_ids)}
cls.__game_cache = {game.app_id: game
for game in iter_all_games()
if game.app_id in cls.__app_id_to_index}
cls.__name_inverted_index = {game.normalized_name: game.app_id
for game in cls.__game_cache.itervalues()}
cls.__dimensions = load_feature_names()
cls.__dimensions_inverted_index = {dim: i for i, dim in enumerate(cls.__dimensions)}
@classmethod
def get_from_steamspy(cls, app_id):
res = requests.get("http://steamspy.com/api.php?request=appdetails&appid=%s"%app_id)
if not 200 <= res.status_code < 300:
raise GameNotFoundException(app_id)
else:
return cls.from_steampspy_json(res.json())
@classmethod
def from_steampspy_json(cls, game):
# We don"t use any of these guys so we have to delete them
game.pop("owners_variance", None)
game.pop("players_forever", None)
game.pop("players_forever_variance", None)
game.pop("players_2weeks", None)
game.pop("players_2weeks_variance", None)
game.pop("average_forever", None)
game.pop("average_2weeks", None)
game.pop("median_forever", None)
game.pop("median_2weeks", None)
game.pop("ccu", None)
game["app_id"] = int(game.pop("appid"))
game["price"] = float(game["price"] or 0) / 100 # price is in cents
game["developer"] = game.get("developer", "") or ""
game["publisher"] = game.get("publisher", "") or ""
if len(game["tags"]) > 0 and isinstance(game["tags"], dict):
tags = {k.lower().strip(): v for k, v in game["tags"].iteritems()}
else:
tags = dict()
game["tags"] = tags
# we have to set the actual userscore and num_reviews to None because this API doesn"t
# return those values
game["userscore"] = None
game["num_reviews"] = None
if game["score_rank"] == "":
game["score_rank"] = None
else:
game["score_rank"] = game["score_rank"]
game["last_updated"] = datetime.utcnow()
return cls(**game)
@classmethod
def from_json(cls, game_json):
game_json["last_updated"] = datetime.utcfromtimestamp(int(game_json["last_updated"]))
return cls(**game_json)
@classmethod
def from_dynamo_json(cls, dynamo_json):
dynamo_json["name"] = dynamo_json.get("name") or ""
dynamo_json["normalized_name"] = dynamo_json.get("normalized_name") or ""
dynamo_json["developer"] = dynamo_json.get("developer") or ""
dynamo_json["publisher"] = dynamo_json.get("publisher") or ""
dynamo_json["price"] = float(dynamo_json["price"])
if dynamo_json["tags"] is not None and len(dynamo_json["tags"]) > 0:
dynamo_json["tags"] = {k: int(v) for k, v in dynamo_json["tags"].iteritems()}
else:
dynamo_json["tags"] = dict()
dynamo_json["last_updated"] = int(dynamo_json["last_updated"])
return cls.from_json(dynamo_json)
@classmethod
def batch_save(cls, games):
return utils.batch_save(cls, games)
@classmethod
def find_by_name(cls, name):
game = cls.__name_inverted_index.get(normalize(name))
if game is not None:
return cls.get(game)
@classmethod
def correct_game_name(cls, game_name, max_results=2):
game_name = normalize(game_name)
matches = sorted(cls.__name_inverted_index.keys(), key=partial(distance, game_name))
return [cls.get(cls.__name_inverted_index[match]) for match in matches[:max_results]]
@classmethod
def get(cls, to_get):
if isinstance(to_get, int):
return cls.__game_cache.get(to_get)
if not (isinstance(to_get, set) and len(to_get) > 0):
raise ValueError("`to_get` must be an int or a non-empty set! (got %s)"%type(to_get))
results = dict()
for app_id in to_get:
# this is a little funky, but it just standardizes how we get a single game, since
# we can"t really to actual multi-gets from Dynamo.
results[app_id] = cls.get(app_id)
return results
@classmethod
def get_all(cls):
return cls.__game_cache.itervalues()
@classmethod
def get_from_dynamo(cls, to_get):
multi = isinstance(to_get, set) and len(to_get) > 0
if not (multi or isinstance(to_get, int)):
raise ValueError("`to_get` must be an int or a non-empty set! (got %s)"%type(to_get))
if multi:
results = dict()
for app_id in to_get:
# this is a little funky, but it just standardizes how we get a single game, since
# we can"t really to actual multi-gets from Dynamo
results[app_id] = cls.get(app_id)
return results
else:
return cls.table.query(KeyConditionExpression=Key(cls.hash_key[0]).eq(to_get))
@classmethod
def get_all_from_dynamo(cls):
return imap(cls.from_dynamo_json, utils.table_scan(cls))
@classmethod
def get_unscored(cls):
return (game for game in cls.__game_cache.itervalues() if game.userscore is not None)
@classmethod
def get_unscored_from_dynamo(cls, limit=1000):
attr_cond = Attr("userscore").eq(None)
return imap(cls.from_dynamo_json,
islice(utils.table_scan(cls, FilterExpression=attr_cond, Limit=limit), limit))
@classmethod
def get_feature_indices(cls, features):
return np.array([cls.__dimensions_inverted_index[feature]
for feature in features
if feature in cls.__dimensions_inverted_index], dtype=np.int)
@classmethod
def compute_library_vector(cls, app_id_list, playtimes):
library_vector = np.zeros(Game.__compressed_matrix.shape[1])
for app_id, pt in zip(app_id_list, playtimes):
if app_id in Game.__app_id_to_index:
library_vector += cls.__game_cache[app_id].vector() * np.log(pt + 1)
library_vector = normalize_matrix(library_vector)[0]
return library_vector
@classmethod
def compute_ranking_for_vector(cls, query_vector, removed_features, app_id=None):
new_vector = np.copy(query_vector)
new_vector[removed_features] = 0
new_vector = normalize_matrix(new_vector)[0]
scores = cls.__compressed_matrix.dot(new_vector)
return [(scores[index], cls.get(cls.__app_ids[index]))
for index in np.argsort(scores)[::-1]
if cls.__app_ids[index] != app_id]
@classmethod
def get_vector_best_features(cls, vector, json_format=False):
best_features = np.argsort(vector)[::-1][:MAX_SPIDER_FEATURES]
best_features.sort()
if json_format:
return (json.dumps(vector[best_features].tolist()),
json.dumps(cls.__dimensions[best_features].tolist()))
else:
return vector[best_features], cls.__dimensions[best_features]
def __init__(self, app_id, name, developer, publisher, owners, userscore, num_reviews,
score_rank, price, tags, last_updated, **kwargs):
self.app_id = app_id
self.name = name
# this one is in the kwargs because it"s optional but depends on self.name
self.normalized_name = kwargs.get("normalized_name") or normalize(self.name)
self.normalized_name = self.normalized_name.encode("ascii")
self.developer = developer
self.publisher = publisher
self.owners = owners
self.userscore = userscore
self.num_reviews = num_reviews
self.score_rank = score_rank
self.price = price
self.tags = tags
self.last_updated = last_updated
if self.app_id not in Game.__app_id_to_index:
return
self.__app_index = Game.__app_id_to_index[self.app_id]
self.__vector = Game.__compressed_matrix[self.__app_index]
self.__best_feature_indices = np.argsort(self.vector())[::-1][:MAX_SPIDER_FEATURES]
# This is just so that the spider chart doens't look so *regular*
self.__best_feature_indices.sort()
def __repr__(self):
return "Game(app_id=%d,name=%s)"%(self.app_id, self.normalized_name)
def __str__(self):
return self.__repr__()
def vector(self):
if self.app_id not in Game.__app_id_to_index:
raise GameNotFoundException(self.app_id)
else:
return self.__vector
def vector_parsable(self):
return ",".join(map(str, self.vector()))
def steam_url(self):
return "http://store.steampowered.com/app/%s"%self.app_id
def steam_image_url(self):
return "http://cdn.akamai.steamstatic.com/steam/apps/%s/header.jpg"%self.app_id
def tags_json(self, just_keys=False, encoded=False):
to_return = None
if just_keys:
to_return = json.dumps(sorted(self.tags.keys(), key=self.tags.get, reverse=True))
else:
to_return = json.dumps(self.tags)
if encoded:
return base64.b64encode(to_return)
else:
return to_return
def get_ranking(self, library_vector, removed_features, bias_weight=0.3):
if self.app_id not in Game.__app_id_to_index:
raise GameNotFoundException(self.app_id)
if library_vector is None and len(removed_features) == 0:
ranking = Game.__ranking[self.__app_index]
scores = Game.__similarities[self.__app_index, ranking]
return [(score, Game.get(Game.__app_ids[app_index]))
for score, app_index in zip(scores, ranking)
if app_index != self.__app_index]
else:
new_vector = self.vector().copy()
if library_vector is not None:
new_vector += library_vector * bias_weight
return Game.compute_ranking_for_vector(new_vector,
removed_features=removed_features,
app_id=self.app_id)
def best_features(self, json_format=False):
features = self.__vector[self.__best_feature_indices]
if json_format:
return json.dumps(features.tolist())
else:
return features
def best_feature_names(self, json_format=False):
dimensions = Game.__dimensions[self.__best_feature_indices]
if json_format:
return json.dumps(dimensions.tolist())
else:
return dimensions
def intersect_features(self, other_game, json_format=False):
features = self.__vector[other_game.__best_feature_indices]
if json_format:
return json.dumps(features.tolist())
else:
return features
def compare_features(self, library_vector, json_format=False):
features = self.__vector[np.argsort(library_vector)[::-1][:MAX_SPIDER_FEATURES]]
if json_format:
return json.dumps(features.tolist())
else:
return features
def to_json(self):
return {
"app_id": self.app_id,
"name": self.name,
"normalized_name": self.normalized_name,
"developer": self.developer,
"publisher": self.publisher,
"owners": self.owners,
"userscore": self.userscore,
"num_reviews": self.num_reviews,
"score_rank": self.score_rank,
"price": self.price,
"tags": self.tags if len(self.tags) > 0 else None,
"last_updated": int(time.mktime(self.last_updated.timetuple())),
}
def to_dynamo_json(self):
dynamo_json = self.to_json()
dynamo_json["price"] = Decimal(str(self.price))
dynamo_json["name"] = self.name or None
dynamo_json["normalized_name"] = self.normalized_name or None
dynamo_json["developer"] = self.developer or None
dynamo_json["publisher"] = self.publisher or None
return dynamo_json
def save(self):
Game.table.put_item(Item=self.to_dynamo_json())
def fetch_more_reviews(self, limit=1000, save=False):
reviews = Review.get_reviews_from_steam(self.app_id, max_reviews=limit)
if save:
Review.batch_save(reviews)
return reviews
def get_saved_reviews(self, key_condition, filter_expression, max_items):
primary_condition = Key(Review.hash_key[0]).eq(self.app_id)
if key_condition is not None:
primary_condition = primary_condition & key_condition
return Review.get(primary_condition, filter_expression, max_items)
def get_recent_reviews(self, max_reviews=100):
return self.get_saved_reviews(None, None, max_reviews)
def update_and_save(self):
self.update_steamspy_attributes()
self.update_userscore()
self.last_updated = datetime.utcnow()
self.save()
def update_steamspy_attributes(self):
new_game = Game.get_from_steamspy(self.app_id)
self.name = new_game.name
self.developer = new_game.developer
self.publisher = new_game.publisher
self.owners = new_game.owners
self.userscore = new_game.userscore
self.num_reviews = new_game.num_reviews
self.score_rank = new_game.score_rank
self.price = new_game.price
self.tags = new_game.tags
def update_userscore(self):
page = requests.get("http://store.steampowered.com/app/%s"%self.app_id)
soup = BeautifulSoup(page.text, "lxml")
summary_section = soup.find_all("div", class_="summary_section")
for sec in summary_section:
title, score, num_reviews = sec.stripped_strings
if "overall" in title.lower():
matches = reviews_re.match(num_reviews)
if score in userscore_to_digit and matches is not None:
self.userscore = userscore_to_digit[score]
num_reviews, = matches.groups()
self.num_reviews = int(num_reviews.replace(",", ""))
print("Succesfully updated userscore for", self.app_id)
return
# This is just so that we don"t retry any games that can"t be scored (maybe because they
# haven"t come out yet) automatically.
print("Could not update userscore for", self.app_id)
self.userscore = -2
self.num_reviews = -2
STEAMSPY_GAMES_JSON = data_file("steamspy_games.json")
def iter_all_games():
if os.path.exists(STEAMSPY_GAMES_JSON):
with open(STEAMSPY_GAMES_JSON) as f:
games_json = json.load(f)
else:
games_json = requests.get("http://steamspy.com/api.php?request=all").json()
with open(STEAMSPY_GAMES_JSON, "w") as f:
json.dump(games_json, f, default=lambda o: o.__dict__, indent=2)
for app_id, game in games_json.iteritems():
if app_id == "999999":
continue
yield Game.from_steampspy_json(game)
def normalize(game_name):
return game_name.lower().encode("ascii", "ignore").strip()
def save_compressed_matrix(app_ids,
compressed_matrix,
filename=data_file("compressed_matrix.npy")):
with open(filename, "wb") as f:
np.save(f, np.column_stack((app_ids, compressed_matrix)))
def load_compressed_matrix(filename=data_file("compressed_matrix.npy")):
with open(filename, "rb") as f:
arr = np.load(f)
return arr[:, 0].astype(np.int), arr[:, 1:]
def load_mallet_matrix(filename=mallet_file("40_features", "doc_matrix.tsv")):
with open(filename, "r") as f:
reader = csv.reader(f, delimiter="\t")
app_ids = list()
vectors = list()
for line in reader:
app_ids.append(np.int(line[1].split("/")[-1]))
vector = np.array(map(np.float, line[2:]))
vectors.append(vector)
return np.array(app_ids), normalize_matrix(np.array(vectors))
def load_feature_names(filename=mallet_file("40_features", "feature_names.csv")):
with open(filename, "rb") as f:
return np.array([line.strip() for line in f if len(line) > 0])
| 39.298729 | 98 | 0.638902 |
33994ce9082b27a5406e5c3b8bf76b12f2fd214e | 1,240 | py | Python | src/oci/dts/shipping_vendors_client_composite_operations.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/dts/shipping_vendors_client_composite_operations.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/dts/shipping_vendors_client_composite_operations.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class ShippingVendorsClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.dts.ShippingVendorsClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new ShippingVendorsClientCompositeOperations object
:param ShippingVendorsClient client:
The service client which will be wrapped by this object
"""
self.client = client
| 49.6 | 245 | 0.73629 |
69bb7acce6adf60bc567e9db3bb36e43f58947e7 | 5,101 | py | Python | client/batteryclient/exceptions.py | martinjrobins/battery-api | c416bdf487f3d41901c7155245ffce7330bdc496 | [
"BSD-3-Clause"
] | 2 | 2021-11-11T11:03:22.000Z | 2022-01-25T19:29:52.000Z | client/batteryclient/exceptions.py | martinjrobins/battery-api | c416bdf487f3d41901c7155245ffce7330bdc496 | [
"BSD-3-Clause"
] | null | null | null | client/batteryclient/exceptions.py | martinjrobins/battery-api | c416bdf487f3d41901c7155245ffce7330bdc496 | [
"BSD-3-Clause"
] | null | null | null | """
Battery Data API
A standard API for accessing battery experiment datasets and metadata # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: martin.robinson@cs.ox.ac.uk
Generated by: https://openapi-generator.tech
"""
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(OpenApiException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| 31.68323 | 87 | 0.602431 |
605846073462da03170e44e31a9feefd77a73634 | 5,726 | py | Python | yoloface.py | 2black0/yoloface | 4b918a8e2894919372c7aa15ffad68fbdba0a24c | [
"MIT"
] | null | null | null | yoloface.py | 2black0/yoloface | 4b918a8e2894919372c7aa15ffad68fbdba0a24c | [
"MIT"
] | null | null | null | yoloface.py | 2black0/yoloface | 4b918a8e2894919372c7aa15ffad68fbdba0a24c | [
"MIT"
] | null | null | null | # *******************************************************************
#
# Author : Thanh Nguyen, 2018
# Email : sthanhng@gmail.com
# Github : https://github.com/sthanhng
#
# BAP, AI Team
# Face detection using the YOLOv3 algorithm
#
# Description : yoloface.py
# The main code of the Face detection using the YOLOv3 algorithm
#
# *******************************************************************
# Usage example: python yoloface.py --image samples/outside_000001.jpg \
# --output-dir outputs/
# python yoloface.py --video samples/subway.mp4 \
# --output-dir outputs/
# python yoloface.py --src 1 --output-dir outputs/
import argparse
import sys
import os
from utils import *
#####################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model-cfg', type=str, default='./cfg/yolov3-face.cfg', help='path to config file')
parser.add_argument('--model-weights', type=str, default='./model-weights/yolov3-wider_16000.weights', help='path to weights of model')
parser.add_argument('--image', type=str, default='', help='path to image file')
parser.add_argument('--video', type=str, default='', help='path to video file')
parser.add_argument('--src', type=int, default=0, help='source of the camera')
parser.add_argument('--output-dir', type=str, default='outputs/', help='path to the output directory')
parser.add_argument('--vsize', nargs='+', type=int, default=[640, 480])
args = parser.parse_args()
#####################################################################
# print the arguments
print('----- info -----')
print('[i] The config file: ', args.model_cfg)
print('[i] The weights of model file: ', args.model_weights)
print('[i] Path to image file: ', args.image)
print('[i] Path to video file: ', args.video)
print('###########################################################\n')
# check outputs directory
if not os.path.exists(args.output_dir):
print('==> Creating the {} directory...'.format(args.output_dir))
os.makedirs(args.output_dir)
else:
print('==> Skipping create the {} directory...'.format(args.output_dir))
# Give the configuration and weight files for the model and load the network
# using them.
net = cv2.dnn.readNetFromDarknet(args.model_cfg, args.model_weights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def _main():
wind_name = 'Drone Following using YOLOv3'
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
output_file = ''
if args.image:
if not os.path.isfile(args.image):
print("[!] ==> Input image file {} doesn't exist".format(args.image))
sys.exit(1)
cap = cv2.VideoCapture(args.image)
output_file = args.image[:-4].rsplit('/')[-1] + '_yoloface.jpg'
elif args.video:
if not os.path.isfile(args.video):
print("[!] ==> Input video file {} doesn't exist".format(args.video))
sys.exit(1)
cap = cv2.VideoCapture(args.video)
output_file = args.video[:-4].rsplit('/')[-1] + '_yoloface.avi'
else:
# Get data from the camera
cap = cv2.VideoCapture(args.src)
cap.set(3, args.vsize[0])
cap.set(4, args.vsize[1])
# Get the video writer initialized to save the output video
if not args.image:
video_writer = cv2.VideoWriter(os.path.join(args.output_dir, output_file),
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
cap.get(cv2.CAP_PROP_FPS), (
round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while True:
has_frame, frame = cap.read()
# Stop the program if reached end of video
if not has_frame:
print('[i] ==> Done processing!!!')
print('[i] ==> Output file is stored at', os.path.join(args.output_dir, output_file))
cv2.waitKey(1000)
break
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (IMG_WIDTH, IMG_HEIGHT),
[0, 0, 0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(get_outputs_names(net))
# Remove the bounding boxes with low confidence
faces = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD)
print('[i] ==> # detected faces: {}'.format(len(faces)))
print('#' * 60)
# initialize the set of information we'll displaying on the frame
info = [
('number of faces detected', '{}'.format(len(faces)))
]
for (i, (txt, val)) in enumerate(info):
text = '{}: {}'.format(txt, val)
cv2.putText(frame, text, (10, (i * 20) + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLOR_RED, 2)
# Save the output video to file
if args.image:
cv2.imwrite(os.path.join(args.output_dir, output_file), frame.astype(np.uint8))
else:
video_writer.write(frame.astype(np.uint8))
cv2.imshow(wind_name, frame)
key = cv2.waitKey(1)
if key == 27 or key == ord('q'):
print('[i] ==> Interrupted by user!')
break
cap.release()
cv2.destroyAllWindows()
print('==> All done!')
print('***********************************************************')
if __name__ == '__main__':
_main()
| 37.92053 | 135 | 0.559553 |
7fe7e3878f789047758ce87958928a63e59e8310 | 2,876 | py | Python | utils/nn/modules/linear.py | roshanr11/Research-DCST | 225461e6ffd7ca5a48b9688946eb36b2d98f358e | [
"MIT"
] | 5 | 2020-04-29T08:48:53.000Z | 2020-12-23T10:11:39.000Z | utils/nn/modules/linear.py | roshanr11/Research-DCST | 225461e6ffd7ca5a48b9688946eb36b2d98f358e | [
"MIT"
] | 2 | 2020-01-11T08:31:06.000Z | 2021-06-09T12:41:32.000Z | utils/nn/modules/linear.py | roshanr11/Research-DCST | 225461e6ffd7ca5a48b9688946eb36b2d98f358e | [
"MIT"
] | 5 | 2019-11-20T02:49:03.000Z | 2020-09-17T15:27:34.000Z | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class BiLinear(nn.Module):
'''
Bi-linear layer
'''
def __init__(self, left_features, right_features, out_features, bias=True):
'''
Args:
left_features: size of left input
right_features: size of right input
out_features: size of output
bias: If set to False, the layer will not learn an additive bias.
Default: True
'''
super(BiLinear, self).__init__()
self.left_features = left_features
self.right_features = right_features
self.out_features = out_features
self.U = Parameter(torch.Tensor(self.out_features, self.left_features, self.right_features))
self.W_l = Parameter(torch.Tensor(self.out_features, self.left_features))
self.W_r = Parameter(torch.Tensor(self.out_features, self.left_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W_l)
nn.init.xavier_uniform_(self.W_r)
nn.init.constant_(self.bias, 0.)
nn.init.xavier_uniform_(self.U)
def forward(self, input_left, input_right):
'''
Args:
input_left: Tensor
the left input tensor with shape = [batch1, batch2, ..., left_features]
input_right: Tensor
the right input tensor with shape = [batch1, batch2, ..., right_features]
Returns:
'''
left_size = input_left.size()
right_size = input_right.size()
assert left_size[:-1] == right_size[:-1], \
"batch size of left and right inputs mis-match: (%s, %s)" % (left_size[:-1], right_size[:-1])
batch_size = int(np.prod(left_size[:-1]))
# convert left and right input to matrices [batch_size, left_features], [batch_size, right_features]
input_left = input_left.view(batch_size, self.left_features)
input_right = input_right.view(batch_size, self.right_features)
# output [batch_size, out_features]
output = F.bilinear(input_left, input_right, self.U, self.bias)
output = output + F.linear(input_left, self.W_l, None) + F.linear(input_right, self.W_r, None)
# convert back to [batch1, batch2, ..., out_features]
return output.view(left_size[:-1] + (self.out_features, ))
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'in1_features=' + str(self.left_features) \
+ ', in2_features=' + str(self.right_features) \
+ ', out_features=' + str(self.out_features) + ')'
| 36.405063 | 108 | 0.624131 |
37b6075c1fc35dd885e86d9a9eded24ca60cc5bf | 287 | py | Python | wrapper/image.py | Mineshaft-game/RenderMite | 4931d109ddebab1ef8efaa41816b9d1e8a52caef | [
"MIT"
] | null | null | null | wrapper/image.py | Mineshaft-game/RenderMite | 4931d109ddebab1ef8efaa41816b9d1e8a52caef | [
"MIT"
] | 5 | 2021-10-25T00:18:59.000Z | 2022-01-18T01:16:26.000Z | wrapper/image.py | Mineshaft-game/RenderMite | 4931d109ddebab1ef8efaa41816b9d1e8a52caef | [
"MIT"
] | 1 | 2022-01-24T20:30:28.000Z | 2022-01-24T20:30:28.000Z | import io
import pygame
def load(file, namehint = ""):
if type(file) == str:
return pygame.image.load(file)
elif type(file) == io.TextIOWrapper:
return pygame.image.load(file, namehint)
def get_image_version():
return pygame.image.get_sdl_image_version()
| 23.916667 | 49 | 0.679443 |
88003852e3da45d5a6e8a02c16cd0aca00013ad3 | 14,790 | py | Python | sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_hdfs_artifact.py | serhatYilmazz/argo-workflows | 774bf47ee678ef31d27669f7d309dee1dd84340c | [
"Apache-2.0"
] | 4 | 2021-12-10T19:52:34.000Z | 2022-01-02T14:56:04.000Z | sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_hdfs_artifact.py | serhatYilmazz/argo-workflows | 774bf47ee678ef31d27669f7d309dee1dd84340c | [
"Apache-2.0"
] | 11 | 2022-01-28T18:30:16.000Z | 2022-03-30T18:35:58.000Z | sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_hdfs_artifact.py | isubasinghe/argo-workflows | 1a6e94f1d490e2265c977514d698a1ca70e14fe3 | [
"Apache-2.0"
] | 1 | 2022-02-08T03:02:02.000Z | 2022-02-08T03:02:02.000Z | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.config_map_key_selector import ConfigMapKeySelector
from argo_workflows.model.secret_key_selector import SecretKeySelector
globals()['ConfigMapKeySelector'] = ConfigMapKeySelector
globals()['SecretKeySelector'] = SecretKeySelector
class IoArgoprojWorkflowV1alpha1HDFSArtifact(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'path': (str,), # noqa: E501
'addresses': ([str],), # noqa: E501
'force': (bool,), # noqa: E501
'hdfs_user': (str,), # noqa: E501
'krb_c_cache_secret': (SecretKeySelector,), # noqa: E501
'krb_config_config_map': (ConfigMapKeySelector,), # noqa: E501
'krb_keytab_secret': (SecretKeySelector,), # noqa: E501
'krb_realm': (str,), # noqa: E501
'krb_service_principal_name': (str,), # noqa: E501
'krb_username': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'path': 'path', # noqa: E501
'addresses': 'addresses', # noqa: E501
'force': 'force', # noqa: E501
'hdfs_user': 'hdfsUser', # noqa: E501
'krb_c_cache_secret': 'krbCCacheSecret', # noqa: E501
'krb_config_config_map': 'krbConfigConfigMap', # noqa: E501
'krb_keytab_secret': 'krbKeytabSecret', # noqa: E501
'krb_realm': 'krbRealm', # noqa: E501
'krb_service_principal_name': 'krbServicePrincipalName', # noqa: E501
'krb_username': 'krbUsername', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, path, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1HDFSArtifact - a model defined in OpenAPI
Args:
path (str): Path is a file path in HDFS
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
addresses ([str]): Addresses is accessible addresses of HDFS name nodes. [optional] # noqa: E501
force (bool): Force copies a file forcibly even if it exists. [optional] # noqa: E501
hdfs_user (str): HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.. [optional] # noqa: E501
krb_c_cache_secret (SecretKeySelector): [optional] # noqa: E501
krb_config_config_map (ConfigMapKeySelector): [optional] # noqa: E501
krb_keytab_secret (SecretKeySelector): [optional] # noqa: E501
krb_realm (str): KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.. [optional] # noqa: E501
krb_service_principal_name (str): KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.. [optional] # noqa: E501
krb_username (str): KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.path = path
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, path, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1HDFSArtifact - a model defined in OpenAPI
Args:
path (str): Path is a file path in HDFS
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
addresses ([str]): Addresses is accessible addresses of HDFS name nodes. [optional] # noqa: E501
force (bool): Force copies a file forcibly even if it exists. [optional] # noqa: E501
hdfs_user (str): HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.. [optional] # noqa: E501
krb_c_cache_secret (SecretKeySelector): [optional] # noqa: E501
krb_config_config_map (ConfigMapKeySelector): [optional] # noqa: E501
krb_keytab_secret (SecretKeySelector): [optional] # noqa: E501
krb_realm (str): KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.. [optional] # noqa: E501
krb_service_principal_name (str): KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.. [optional] # noqa: E501
krb_username (str): KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.path = path
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 48.333333 | 188 | 0.59378 |
e26ab0e76a6fa6dab088e87b23a7b83f188c4191 | 329 | py | Python | vieglat/labsfinal.py | kosovojs/pywikibot-scripts | 4a9bf5177ebcfbba719970f9f3b48fbd51831818 | [
"MIT"
] | 3 | 2021-10-03T17:27:43.000Z | 2021-10-05T12:27:06.000Z | vieglat/labsfinal.py | kosovojs/pywikibot-scripts | 4a9bf5177ebcfbba719970f9f3b48fbd51831818 | [
"MIT"
] | null | null | null | vieglat/labsfinal.py | kosovojs/pywikibot-scripts | 4a9bf5177ebcfbba719970f9f3b48fbd51831818 | [
"MIT"
] | null | null | null | import pywikibot, re
fileeopen = open('dsfsdfsdfsdfsdfsdfsdfsdfsdfsdfsd.txt','r', encoding='utf-8').read()
site = pywikibot.Site("wikidata", "wikidata")
articletitle = 'User:Edgars2007/IAAF'
saglapa = pywikibot.Page(site,articletitle)
saglapa.text = fileeopen
saglapa.save(summary='upd', botflag=False, minor=False) | 29.909091 | 86 | 0.741641 |
090044cddfd15d47c1c86c28d4dc2895b596b05f | 6,955 | py | Python | sdk/python/pulumi_google_native/compute/beta/get_firewall_policy_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/compute/beta/get_firewall_policy_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/compute/beta/get_firewall_policy_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFirewallPolicyIamPolicyResult',
'AwaitableGetFirewallPolicyIamPolicyResult',
'get_firewall_policy_iam_policy',
'get_firewall_policy_iam_policy_output',
]
@pulumi.output_type
class GetFirewallPolicyIamPolicyResult:
def __init__(__self__, audit_configs=None, bindings=None, etag=None, rules=None, version=None):
if audit_configs and not isinstance(audit_configs, list):
raise TypeError("Expected argument 'audit_configs' to be a list")
pulumi.set(__self__, "audit_configs", audit_configs)
if bindings and not isinstance(bindings, list):
raise TypeError("Expected argument 'bindings' to be a list")
pulumi.set(__self__, "bindings", bindings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if rules and not isinstance(rules, list):
raise TypeError("Expected argument 'rules' to be a list")
pulumi.set(__self__, "rules", rules)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> Sequence['outputs.AuditConfigResponse']:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@property
@pulumi.getter
def bindings(self) -> Sequence['outputs.BindingResponse']:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> str:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.RuleResponse']:
"""
This is deprecated and has no effect. Do not use.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def version(self) -> int:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
class AwaitableGetFirewallPolicyIamPolicyResult(GetFirewallPolicyIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallPolicyIamPolicyResult(
audit_configs=self.audit_configs,
bindings=self.bindings,
etag=self.etag,
rules=self.rules,
version=self.version)
def get_firewall_policy_iam_policy(options_requested_policy_version: Optional[str] = None,
resource: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallPolicyIamPolicyResult:
"""
Gets the access control policy for a resource. May be empty if no such policy or resource exists.
"""
__args__ = dict()
__args__['optionsRequestedPolicyVersion'] = options_requested_policy_version
__args__['resource'] = resource
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/beta:getFirewallPolicyIamPolicy', __args__, opts=opts, typ=GetFirewallPolicyIamPolicyResult).value
return AwaitableGetFirewallPolicyIamPolicyResult(
audit_configs=__ret__.audit_configs,
bindings=__ret__.bindings,
etag=__ret__.etag,
rules=__ret__.rules,
version=__ret__.version)
@_utilities.lift_output_func(get_firewall_policy_iam_policy)
def get_firewall_policy_iam_policy_output(options_requested_policy_version: Optional[pulumi.Input[Optional[str]]] = None,
resource: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFirewallPolicyIamPolicyResult]:
"""
Gets the access control policy for a resource. May be empty if no such policy or resource exists.
"""
...
| 56.544715 | 1,084 | 0.701941 |
ca6473ce3cf68f6d35e555aa84e0fa1c148f7554 | 3,955 | py | Python | sdk/python/pulumi_gcp/monitoring/group.py | stack72/pulumi-gcp | e63e4ed3129fe8e64e4869f4839ba2b20f57cb57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/monitoring/group.py | stack72/pulumi-gcp | e63e4ed3129fe8e64e4869f4839ba2b20f57cb57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/monitoring/group.py | stack72/pulumi-gcp | e63e4ed3129fe8e64e4869f4839ba2b20f57cb57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Group(pulumi.CustomResource):
display_name: pulumi.Output[str]
filter: pulumi.Output[str]
is_cluster: pulumi.Output[bool]
name: pulumi.Output[str]
parent_name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
def __init__(__self__, resource_name, opts=None, display_name=None, filter=None, is_cluster=None, parent_name=None, project=None, __name__=None, __opts__=None):
"""
The description of a dynamic collection of monitored resources. Each group
has a filter that is matched against monitored resources and their
associated metadata. If a group's filter matches an available monitored
resource, then that resource is a member of that group.
To get more information about Group, see:
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.groups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/monitoring/groups/)
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_working_dir=monitoring_group_basic&cloudshell_image=gcr.io%2Fgraphite-cloud-shell-images%2Fterraform%3Alatest&open_in_editor=main.tf&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if display_name is None:
raise TypeError('Missing required property display_name')
__props__['display_name'] = display_name
if filter is None:
raise TypeError('Missing required property filter')
__props__['filter'] = filter
__props__['is_cluster'] = is_cluster
__props__['parent_name'] = parent_name
__props__['project'] = project
__props__['name'] = None
super(Group, __self__).__init__(
'gcp:monitoring/group:Group',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.461538 | 383 | 0.675853 |
e0ccfb20fdeb49f975964ae40f16e30cbc37e733 | 14,628 | py | Python | congress/datalog/database.py | mail2nsrajesh/congress | a724dfb59c43a5e88e2b03e714a5f962d6976762 | [
"Apache-2.0"
] | 50 | 2015-04-21T14:12:01.000Z | 2020-06-01T06:23:13.000Z | congress/datalog/database.py | mail2nsrajesh/congress | a724dfb59c43a5e88e2b03e714a5f962d6976762 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | congress/datalog/database.py | mail2nsrajesh/congress | a724dfb59c43a5e88e2b03e714a5f962d6976762 | [
"Apache-2.0"
] | 25 | 2015-05-22T04:02:33.000Z | 2020-01-14T12:15:12.000Z | # Copyright (c) 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from six.moves import range
from congress.datalog import base
from congress.datalog import compile
from congress.datalog import topdown
from congress.datalog import unify
from congress.datalog import utility
from congress import exception
##############################################################################
# Concrete Theory: Database
##############################################################################
class Database(topdown.TopDownTheory):
class Proof(object):
def __init__(self, binding, rule):
self.binding = binding
self.rule = rule
def __str__(self):
return "apply({}, {})".format(str(self.binding), str(self.rule))
def __eq__(self, other):
result = (self.binding == other.binding and
self.rule == other.rule)
# LOG.debug("Pf: Comparing %s and %s: %s", self, other, result)
# LOG.debug("Pf: %s == %s is %s",
# self.binding, other.binding, self.binding == other.binding)
# LOG.debug("Pf: %s == %s is %s",
# self.rule, other.rule, self.rule == other.rule)
return result
def __ne__(self, other):
return not self.__eq__(other)
class ProofCollection(object):
def __init__(self, proofs):
self.contents = list(proofs)
def __str__(self):
return '{' + ",".join(str(x) for x in self.contents) + '}'
def __isub__(self, other):
if other is None:
return
# LOG.debug("PC: Subtracting %s and %s", self, other)
remaining = []
for proof in self.contents:
if proof not in other.contents:
remaining.append(proof)
self.contents = remaining
return self
def __ior__(self, other):
if other is None:
return
# LOG.debug("PC: Unioning %s and %s", self, other)
for proof in other.contents:
# LOG.debug("PC: Considering %s", proof)
if proof not in self.contents:
self.contents.append(proof)
return self
def __getitem__(self, key):
return self.contents[key]
def __len__(self):
return len(self.contents)
def __ge__(self, iterable):
for proof in iterable:
if proof not in self.contents:
# LOG.debug("Proof %s makes %s not >= %s",
# proof, self, iterstr(iterable))
return False
return True
def __le__(self, iterable):
for proof in self.contents:
if proof not in iterable:
# LOG.debug("Proof %s makes %s not <= %s",
# proof, self, iterstr(iterable))
return False
return True
def __eq__(self, other):
return self <= other and other <= self
def __ne__(self, other):
return not self.__eq__(other)
class DBTuple(object):
def __init__(self, iterable, proofs=None):
self.tuple = tuple(iterable)
if proofs is None:
proofs = []
self.proofs = Database.ProofCollection(proofs)
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.tuple) + str(self.proofs)
def __len__(self):
return len(self.tuple)
def __getitem__(self, index):
return self.tuple[index]
def __setitem__(self, index, value):
self.tuple[index] = value
def match(self, atom, unifier):
# LOG.debug("DBTuple matching %s against atom %s in %s",
# self, iterstr(atom.arguments), unifier)
if len(self.tuple) != len(atom.arguments):
return None
changes = []
for i in range(0, len(atom.arguments)):
val, binding = unifier.apply_full(atom.arguments[i])
# LOG.debug("val(%s)=%s at %s; comparing to object %s",
# atom.arguments[i], val, binding, self.tuple[i])
if val.is_variable():
changes.append(binding.add(
val, compile.Term.create_from_python(self.tuple[i]),
None))
else:
if val.name != self.tuple[i]:
unify.undo_all(changes)
return None
return changes
def __init__(self, name=None, abbr=None, theories=None, schema=None,
desc=None, owner=None):
super(Database, self).__init__(
name=name, abbr=abbr, theories=theories, schema=schema,
desc=desc, owner=owner)
self.data = {}
self.kind = base.DATABASE_POLICY_TYPE
def str2(self):
def hash2str(h):
s = "{"
s += ", ".join(["{} : {}".format(str(key), str(h[key]))
for key in h])
return s
def hashlist2str(h):
strings = []
for key in h:
s = "{} : ".format(key)
s += '['
s += ', '.join([str(val) for val in h[key]])
s += ']'
strings.append(s)
return '{' + ", ".join(strings) + '}'
return hashlist2str(self.data)
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not self.__eq__(other)
def __sub__(self, other):
def add_tuple(table, dbtuple):
new = [table]
new.extend(dbtuple.tuple)
results.append(new)
results = []
for table in self.data:
if table not in other.data:
for dbtuple in self.data[table]:
add_tuple(table, dbtuple)
else:
for dbtuple in self.data[table]:
if dbtuple not in other.data[table]:
add_tuple(table, dbtuple)
return results
def __or__(self, other):
def add_db(db):
for table in db.data:
for dbtuple in db.data[table]:
result.insert(compile.Literal.create_from_table_tuple(
table, dbtuple.tuple), proofs=dbtuple.proofs)
result = Database()
add_db(self)
add_db(other)
return result
def __getitem__(self, key):
# KEY must be a tablename
return self.data[key]
def content(self, tablenames=None):
"""Return a sequence of Literals representing all the table data."""
results = []
if tablenames is None:
tablenames = self.data.keys()
for table in tablenames:
if table not in self.data:
continue
for dbtuple in self.data[table]:
results.append(compile.Literal.create_from_table_tuple(
table, dbtuple.tuple))
return results
def is_noop(self, event):
"""Returns T if EVENT is a noop on the database."""
# insert/delete same code but with flipped return values
# Code below is written as insert, except noop initialization.
if event.is_insert():
noop = True
else:
noop = False
if event.formula.table.table not in self.data:
return not noop
event_data = self.data[event.formula.table.table]
raw_tuple = tuple(event.formula.argument_names())
for dbtuple in event_data:
if dbtuple.tuple == raw_tuple:
if event.proofs <= dbtuple.proofs:
return noop
return not noop
def __contains__(self, formula):
if not compile.is_atom(formula):
return False
if formula.table.table not in self.data:
return False
event_data = self.data[formula.table.table]
raw_tuple = tuple(formula.argument_names())
return any((dbtuple.tuple == raw_tuple for dbtuple in event_data))
def explain(self, atom):
if atom.table.table not in self.data or not atom.is_ground():
return self.ProofCollection([])
args = tuple([x.name for x in atom.arguments])
for dbtuple in self.data[atom.table.table]:
if dbtuple.tuple == args:
return dbtuple.proofs
def tablenames(self, body_only=False, include_builtin=False,
include_modal=True):
"""Return all table names occurring in this theory."""
if body_only:
return []
return self.data.keys()
# overloads for TopDownTheory so we can properly use the
# top_down_evaluation routines
def defined_tablenames(self):
return self.data.keys()
def head_index(self, table, match_literal=None):
if table not in self.data:
return []
return self.data[table]
def head(self, thing):
return thing
def body(self, thing):
return []
def bi_unify(self, dbtuple, unifier1, atom, unifier2, theoryname):
"""THING1 is always a ground DBTuple and THING2 is always an ATOM."""
return dbtuple.match(atom, unifier2)
def atom_to_internal(self, atom, proofs=None):
return atom.table.table, self.DBTuple(atom.argument_names(), proofs)
def insert(self, atom, proofs=None):
"""Inserts ATOM into the DB. Returns changes."""
return self.modify(compile.Event(formula=atom, insert=True,
proofs=proofs))
def delete(self, atom, proofs=None):
"""Deletes ATOM from the DB. Returns changes."""
return self.modify(compile.Event(formula=atom, insert=False,
proofs=proofs))
def update(self, events):
"""Applies all of EVENTS to the DB.
Each event is either an insert or a delete.
"""
changes = []
for event in events:
changes.extend(self.modify(event))
return changes
def update_would_cause_errors(self, events):
"""Return a list of Policyxception.
Return a list of PolicyException if we were
to apply the events EVENTS to the current policy.
"""
self.log(None, "update_would_cause_errors %s", utility.iterstr(events))
errors = []
for event in events:
if not compile.is_atom(event.formula):
errors.append(exception.PolicyException(
"Non-atomic formula is not permitted: {}".format(
str(event.formula))))
else:
errors.extend(compile.fact_errors(
event.formula, self.theories, self.name))
return errors
def modify(self, event):
"""Insert/Delete atom.
Inserts/deletes ATOM and returns a list of changes that
were caused. That list contains either 0 or 1 Event.
"""
assert compile.is_atom(event.formula), "Modify requires Atom"
atom = event.formula
self.log(atom.table.table, "Modify: %s", atom)
if self.is_noop(event):
self.log(atom.table.table, "Event %s is a noop", event)
return []
if event.insert:
self.insert_actual(atom, proofs=event.proofs)
else:
self.delete_actual(atom, proofs=event.proofs)
return [event]
def insert_actual(self, atom, proofs=None):
"""Workhorse for inserting ATOM into the DB.
Along with proofs explaining how ATOM was computed from other tables.
"""
assert compile.is_atom(atom), "Insert requires Atom"
table, dbtuple = self.atom_to_internal(atom, proofs)
self.log(table, "Insert: %s", atom)
if table not in self.data:
self.data[table] = [dbtuple]
self.log(atom.table.table, "First tuple in table %s", table)
return
else:
for existingtuple in self.data[table]:
assert existingtuple.proofs is not None
if existingtuple.tuple == dbtuple.tuple:
assert existingtuple.proofs is not None
existingtuple.proofs |= dbtuple.proofs
assert existingtuple.proofs is not None
return
self.data[table].append(dbtuple)
def delete_actual(self, atom, proofs=None):
"""Workhorse for deleting ATOM from the DB.
Along with the proofs that are no longer true.
"""
assert compile.is_atom(atom), "Delete requires Atom"
self.log(atom.table.table, "Delete: %s", atom)
table, dbtuple = self.atom_to_internal(atom, proofs)
if table not in self.data:
return
for i in range(0, len(self.data[table])):
existingtuple = self.data[table][i]
if existingtuple.tuple == dbtuple.tuple:
existingtuple.proofs -= dbtuple.proofs
if len(existingtuple.proofs) == 0:
del self.data[table][i]
return
def policy(self):
"""Return the policy for this theory.
No policy in this theory; only data.
"""
return []
def get_arity_self(self, tablename):
if tablename not in self.data:
return None
if len(self.data[tablename]) == 0:
return None
return len(self.data[tablename][0].tuple)
def content_string(self):
s = ""
for lit in self.content():
s += str(lit) + '\n'
return s + '\n'
| 35.333333 | 79 | 0.551682 |
32480ac77fa5b988b34fbbe7b0f6eeeb86df5ac5 | 7,364 | py | Python | pypureclient/flasharray/FA_2_1/api/remote_pods_api.py | bdclark/py-pure-client | 18f4e953708f98ffe585ff79e4938c0156e32ded | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_1/api/remote_pods_api.py | bdclark/py-pure-client | 18f4e953708f98ffe585ff79e4938c0156e32ded | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_1/api/remote_pods_api.py | bdclark/py-pure-client | 18f4e953708f98ffe585ff79e4938c0156e32ded | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class RemotePodsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api21_remote_pods_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
on=None, # type: List[str]
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.RemotePodsResponse
"""List remote pods
Returns a list of pods that that are on connected arrays but not stretched to this array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_remote_pods_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] on: Performs the operation on the target name specified. Enter multiple target names in comma-separated format. For example, `targetName01,targetName02`.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: RemotePodsResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_remote_pods_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_remote_pods_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'on' in params:
query_params.append(('on', params['on']))
collection_formats['on'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.1/remote-pods', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemotePodsResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 51.138889 | 671 | 0.666893 |
63481a289260eeac9e60a41f0786d12db1e98581 | 10,421 | py | Python | conservation/alignment_based/multiple_sequence_alignment.py | siret/prankweb | e36f1ca5cfbce2f8aa8dc89c04add0b4c550c266 | [
"Apache-2.0"
] | 2 | 2019-10-15T11:09:30.000Z | 2019-10-15T20:31:52.000Z | conservation/alignment_based/multiple_sequence_alignment.py | siret/p2rank-web | e36f1ca5cfbce2f8aa8dc89c04add0b4c550c266 | [
"Apache-2.0"
] | 23 | 2019-09-25T10:25:16.000Z | 2020-10-06T12:49:25.000Z | conservation/alignment_based/multiple_sequence_alignment.py | siret/prankweb | e36f1ca5cfbce2f8aa8dc89c04add0b4c550c266 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Given a sequence in a FASTA file compute MSA.
#
# Workflow:
# input-sequence.fasta
# psiblast -> _execute_psiblast
# Query for similar proteins.
# psiblast-filtered -> _filter_psiblast_file
# Filter proteins by similarity.
# blastdb-output -> _execute_blastdbcmd
# Get sequences for the proteins.
# blast-output -> _execute_cdhit
# Cluster sequences and select
# representatives.
# muscle-input -> _merge_files([sequence_file, fasta_file], ..)
# Add the input sequence to the
# found sequences.
# muscle-output -> _compute_msa_for_sequences
# Apply muscle to get MSA.
# msa -> _order_muscle_result
# Reorder sequences and put the
# input sequence first.
#
import os
import typing
import logging
class MsaConfiguration:
# Prefix used to identify the sequence.
sequence_prefix: str = "query_sequence|"
# Amount of sequences after filtering that need to be found, if not
# enough sequences are found then try another database.
minimum_sequence_count: int
# Minimum required coverage for similar sequences in percents.
minimum_coverage: int
# Limit how many sequences are used to compute MSA. Use 0 for no limit.
maximum_sequences_for_msa: int
# List of databases used to search for multisequence alignment.
blast_databases: typing.List[str] = []
# Path to a working directory.
working_dir: str
# Execute psiblast for given files.
# Arguments: input file, output file, database
execute_psiblast: typing.Callable[[str, str, str], None]
# Execute psiblast for given files.
# Arguments: input file, output file, database
execute_blastdb: typing.Callable[[str, str, str], None]
# Execute psiblast for given files.
# Arguments: sequences files, output file, log file
execute_cdhit: typing.Callable[
[
str,
str,
str,
],
None,
]
# Execute psiblast for given files.
# Arguments: input file, output file
execute_muscle: typing.Callable[[str, str], None]
def compute_msa(fasta_file: str, output_file: str, config: MsaConfiguration):
blast_input = _prepare_blast_input(fasta_file, config)
blast_output = os.path.join(config.working_dir, "blast-output")
_find_similar_sequences(blast_input, blast_output, config)
muscle_file = os.path.join(config.working_dir, "muscle-output")
_compute_msa_for_sequences(blast_input, blast_output, muscle_file, config)
_prepare_for_conservation(muscle_file, output_file, config)
config.maximum_sequences_for_msa = 1
# region Prepare Blast input
def _prepare_blast_input(fasta_file: str, config: MsaConfiguration) -> str:
"""Prepare FASTA file pro Blast.
Mare sure there is only one sequence in the file. Add recognizable header
to the sequence name and save it to a file.
"""
sequences = _read_fasta_file(fasta_file)
if len(sequences) != 1:
raise Exception(
"The input file must contains only one sequence not {}".format(
len(sequences)
)
)
blast_input = os.path.join(config.working_dir, "input-sequence.fasta")
_save_sequence_to_fasta(
config.sequence_prefix + sequences[0][0], sequences[0][1], blast_input
)
return blast_input
def _read_fasta_file(input_file: str) -> typing.List[typing.Tuple[str, str]]:
header = None
result = []
sequence = ""
with open(input_file) as in_stream:
for line in in_stream:
line = line.rstrip()
if line.startswith(">"):
if header is None:
header = line[1:]
else:
result.append((header, sequence))
header = line[1:]
sequence = ""
else:
sequence += line
if header is not None:
result.append((header, sequence))
return result
def _save_sequence_to_fasta(header: str, sequence: str, output_file: str):
with open(output_file, "w") as out_stream:
out_stream.write(_format_fasta_sequence(header, sequence))
def _format_fasta_sequence(header: str, sequence: str, line_width: int = 80):
lines = "\n".join(
[
sequence[index: index + line_width]
for index in range(0, len(sequence), line_width)
]
)
return f">{header}\n{lines}\n"
# endregion
# region Find similar sequences
def _find_similar_sequences(
input_file: str, output_file: str, config: MsaConfiguration
):
"""
Try to find sufficient amount of similar sequences in databases.
"""
for database in config.blast_databases:
found = _find_similar_sequences_in_database(
input_file, output_file, config, database
)
if found:
return
raise Exception("Not enough similar sequences found!")
def _find_similar_sequences_in_database(
input_file: str, output_file: str, config: MsaConfiguration,
database: str
) -> bool:
logging.info(
"Searching for similar sequences using psiblast on '%s' database ...",
database
)
psiblast = os.path.join(config.working_dir, "psiblast")
config.execute_psiblast(input_file, psiblast, database)
logging.info("Filtering result to match required criteria...")
psiblast_filtered = os.path.join(config.working_dir, "psiblast-filtered")
filtered_count = _filter_psiblast_file(psiblast, psiblast_filtered, config)
if filtered_count < config.minimum_sequence_count:
logging.info("Not enough sequences.")
return False
logging.info("Retrieving content of sequences ...")
sequences = os.path.join(config.working_dir, "blastdb-output")
config.execute_blastdb(psiblast_filtered, sequences, database)
# Cluster and select representatives.
logging.info("Selecting representative sequences ...")
cdhit_log_file = os.path.join(config.working_dir, "cd-hit.log")
cdhit_output_file = os.path.join(config.working_dir, "cd-hit")
config.execute_cdhit(sequences, cdhit_output_file, cdhit_log_file)
if not _found_enough_sequences(cdhit_output_file, config):
return False
_select_sequences(cdhit_output_file, output_file,
config.maximum_sequences_for_msa)
return True
def _filter_psiblast_file(
input_file: str, output_file: str, config: MsaConfiguration
) -> int:
inputs_count = 0
results_count = 0
with open(input_file) as in_stream, open(output_file, "w") as out_stream:
for line in in_stream:
inputs_count += 1
identifier, coverage, identity = line.rstrip().split("\t")
if float(coverage) < config.minimum_coverage:
continue
if not (30 <= float(identity) <= 95):
continue
out_stream.write(identifier)
out_stream.write("\n")
results_count += 1
logging.info("Filtering results from %s to %s", inputs_count, results_count)
return results_count
def _found_enough_sequences(fasta_file: str, config: MsaConfiguration) -> bool:
counter = 0
for _, _ in _read_fasta_file(fasta_file):
counter += 1
logging.info("Number of sequences in %s is %s", fasta_file, counter)
return counter > config.minimum_sequence_count
def _select_sequences(input_file: str, output_file: str, count: int):
"""
Keep only first N of sequences.
"""
sequences = _read_fasta_file(input_file)
if 0 < count < len(sequences):
filtered_sequence = [
sequences[index] for index in
uniform_sample(0, len(sequences), count)
]
logging.info(
"Using %s from %s sequences", len(filtered_sequence), len(sequences)
)
sequences = filtered_sequence
# Write only selected.
with open(output_file, "w") as out_stream:
for (header, sequence) in sequences:
out_stream.write(_format_fasta_sequence(header, sequence))
def uniform_sample(start, end, total_count):
step = end / total_count
count = 0
index = start
while index < end and count < total_count:
yield int(index)
index += step
count += 1
# endregion
def _compute_msa_for_sequences(
fasta_file: str, sequence_file: str, output_file: str,
config: MsaConfiguration
):
muscle_input = os.path.join(config.working_dir, "muscle-input")
_merge_files([sequence_file, fasta_file], muscle_input)
config.execute_muscle(muscle_input, output_file)
def _merge_files(input_files: typing.List[str], output_file: str):
with open(output_file, "w") as out_stream:
for input_file in input_files:
with open(input_file) as in_stream:
for line in in_stream:
out_stream.write(line)
def _prepare_for_conservation(
input_file: str, output_file: str, config: MsaConfiguration
):
"""
Put the marked sequence at the top of the file and change it's header,
and fix some issues.
"""
logging.info("Ordering muscle results ...")
first_header = None
first_sequence = None
for header, sequence in _read_fasta_file(input_file):
if header.startswith(config.sequence_prefix):
# We can remove the prefix here
first_header = header[len(config.sequence_prefix):]
first_sequence = sequence
break
if first_header is None:
raise Exception(
"Missing header '" + config.sequence_prefix + "' in " + input_file
)
with open(output_file, "w", newline="\n") as out_stream:
out_stream.write(
_format_fasta_sequence(first_header, first_sequence, 60))
for header, sequence in _read_fasta_file(input_file):
if header.startswith(config.sequence_prefix):
continue
out_stream.write(_format_fasta_sequence(header, sequence, 60))
| 35.566553 | 80 | 0.638518 |
2f70eccff16b746af932489e160e77cf949838fa | 9,037 | py | Python | Functions/Four/SplitET.py | trngb/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 11 | 2018-09-25T08:58:26.000Z | 2021-02-13T18:58:05.000Z | Functions/Four/SplitET.py | trngbich/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 1 | 2020-07-03T02:36:41.000Z | 2021-03-21T22:20:47.000Z | Functions/Four/SplitET.py | trngbich/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 16 | 2018-09-28T22:55:11.000Z | 2021-02-22T13:03:56.000Z | # -*- coding: utf-8 -*-
"""
Authors: Bert Coerver, Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/watools
Module: Function/Four
"""
# import general python modules
import pandas as pd
import numpy as np
import os
def Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate):
"""
This functions split the evapotranspiration into green and blue evapotranspiration.
Parameters
----------
nc_outname : str
Path to the .nc file containing data
Moving_Averaging_Length: integer
Number defines the amount of months that are taken into account
Returns
-------
ET_Blue : array
Array[time, lat, lon] contains Blue Evapotranspiration
ET_Green : array
Array[time, lat, lon] contains Green Evapotranspiration
"""
import watools.General.raster_conversions as RC
import watools.Functions.Start.Get_Dictionaries as GD
# Input Parameters functions
scale = 1.1
# Open LU map for example
LU = RC.Open_nc_array(nc_outname, "Landuse")
# Define monthly dates
Dates = pd.date_range(Startdate, Enddate, freq = 'MS')
# Get moving window period
# Get dictionaries and keys for the moving average
ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
Classes = ET_Blue_Green_Classes_dict.keys()
Moving_Averages_Values_Array = np.ones(LU.shape) * np.nan
# Create array based on the dictionary that gives the Moving average tail for every pixel
for Class in Classes:
Values_Moving_Window_Class = Moving_Window_Per_Class_dict[Class]
for Values_Class in ET_Blue_Green_Classes_dict[Class]:
Moving_Averages_Values_Array[LU == Values_Class] = Values_Moving_Window_Class
Additional_Months_front = int(np.nanmax(Moving_Averages_Values_Array))
Additional_Months_tail = 0
Start_period = Additional_Months_front
End_period = Additional_Months_tail * -1
########################### Extract ETref data #################################
if ETref_Product is 'WA_ETref':
# Define data path
Data_Path_ETref = os.path.join(Dir_Basin, 'ETref', 'Monthly')
else:
Data_Path_ETref = ETref_Product
ETref = Complete_3D_Array(nc_outname, 'Reference_Evapotranspiration', Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path_ETref)
######################## Extract Precipitation data ########################
if (P_Product == "CHIRPS" or P_Product == "RFE"):
# Define data path
Data_Path_P = os.path.join(Dir_Basin, 'Precipitation', P_Product, 'Monthly')
else:
Data_Path_P = P_Product
P = Complete_3D_Array(nc_outname, 'Precipitation', Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path_P)
########################## Extract actET data ##############################
ET = RC.Open_nc_array(nc_outname, "Actual_Evapotranspiration", Startdate, Enddate)
############ Create average ETref and P using moving window ################
ETref_Ave = np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])]) * np.nan
P_Ave = np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])]) * np.nan
if End_period == 0:
P_period = P[Start_period:,:,:]
ETref_period = ETref[Start_period:,:,:]
else:
P_period = P[Start_period:End_period,:,:]
ETref_period = ETref[Start_period:End_period,:,:]
# Loop over the different moving average tails
for One_Value in np.unique(list(Moving_Window_Per_Class_dict.values())):
# If there is no moving average is 1 than use the value of the original ETref or P
if One_Value == 1:
Values_Ave_ETref = ETref[int(ETref.shape[0])-len(Dates):,:,:]
Values_Ave_P = P[int(ETref.shape[0])-len(Dates):,:,:]
# If there is tail, apply moving average over the whole datacube
else:
Values_Ave_ETref_tot = RC.Moving_average(ETref, One_Value - 1, 0)
Values_Ave_P_tot = RC.Moving_average(P, One_Value - 1, 0)
Values_Ave_ETref = Values_Ave_ETref_tot[int(Values_Ave_ETref_tot.shape[0])-len(Dates):,:,:]
Values_Ave_P = Values_Ave_P_tot[int(Values_Ave_P_tot.shape[0])-len(Dates):,:,:]
# Only add the data where the corresponding tail corresponds with the one_value
ETref_Ave[:,Moving_Averages_Values_Array == One_Value] = Values_Ave_ETref[:,Moving_Averages_Values_Array == One_Value]
P_Ave[:,Moving_Averages_Values_Array == One_Value] = Values_Ave_P[:,Moving_Averages_Values_Array == One_Value]
##################### Calculate ET blue and green ###########################
# Mask out the nan values(if one of the parameters is nan, then they are all nan)
mask = np.any([np.isnan(LU)*np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])])==1, np.isnan(ET), np.isnan(ETref[int(ETref.shape[0])-len(Dates):,:,:]), np.isnan(P[int(ETref.shape[0])-len(Dates):,:,:]), np.isnan(P_Ave), np.isnan(ETref_Ave)],axis=0)
ETref_period[mask] = ETref_Ave[mask] = ET[mask] = P_period[mask] = P_Ave[mask] = np.nan
phi = ETref_Ave / P_Ave
# Calculate Budyko-index
Budyko = scale * np.sqrt(phi*np.tanh(1/phi)*(1-np.exp(-phi)))
# Calculate ET green
ETgreen_DataCube = np.minimum(Budyko*P[int(ETref.shape[0])-len(Dates):,:,:],ET)
# Calculate ET blue
ETblue_DataCube = ET - ETgreen_DataCube
return(np.array(ETblue_DataCube), np.array(ETgreen_DataCube))
def Calc_budyko(phi):
"""
This functions calculate the budyko number based on the aridity index
Parameters
----------
phi : Array
Array[time, lat, lon] containing phi
Returns
-------
Budyko : array
Array[time, lat, lon] containing Budyko number
"""
Budyko = np.sqrt(phi * np.tanh(1/phi) * (1-np.exp(-phi)))
return(Budyko)
def Complete_3D_Array(nc_outname, Var, Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path):
from netCDF4 import Dataset
import watools.General.raster_conversions as RC
# Define startdate and enddate with moving average
Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_front)
Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = Additional_Months_tail)
Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)
Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)
# Extract moving average period before
Year_front = int(Startdate_Moving_Average.year)
filename_front = os.path.join(os.path.dirname(nc_outname), "%d.nc" %Year_front)
Enddate_Front = pd.Timestamp(Startdate) - pd.DateOffset(days = 1)
# Extract inside start and enddate
Array_main = RC.Open_nc_array(nc_outname, Var, Startdate, Enddate)
if Additional_Months_front > 0:
# Extract moving average period before
if os.path.exists(filename_front):
# Open variables in netcdf
fh = Dataset(filename_front)
Variables_NC = [var for var in fh.variables]
fh.close()
if Var in Variables_NC:
Array_front = RC.Open_nc_array(filename_front, Var, Startdate_Moving_Average_String, Enddate_Front)
else:
Array_front = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_Moving_Average_String, Enddate_Front, nc_outname)
else:
Array_front = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_Moving_Average_String, Enddate_Front, nc_outname)
# Merge dataset
Array_main = np.vstack([Array_front,Array_main])
if Additional_Months_tail > 0:
# Extract moving average period after
Year_tail = int(Enddate_Moving_Average.year)
filename_tail = os.path.join(os.path.dirname(nc_outname), "%d.nc" %Year_tail)
Startdate_tail = pd.Timestamp(Enddate) + pd.DateOffset(days = 1)
# Extract moving average period after
if os.path.exists(filename_tail):
# Open variables in netcdf
fh = Dataset(filename_tail)
Variables_NC = [var for var in fh.variables]
fh.close()
if Var in Variables_NC:
Array_tail = RC.Open_nc_array(filename_tail, Var, Startdate_tail, Enddate_Moving_Average_String)
else:
Array_tail = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_tail, Enddate_Moving_Average_String, nc_outname)
else:
Array_tail = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_tail, Enddate_Moving_Average_String, nc_outname)
# Merge dataset
Array_main = np.vstack([Array_main,Array_tail])
return(Array_main)
| 40.891403 | 255 | 0.674781 |
173854c081f9143e3c09bdf01040e6da0dbafdfb | 186 | py | Python | utilities/csv_to_json.py | sachinio/redalert | b50d65757e80ec10c5fa220133c5206ea50a2c46 | [
"MIT"
] | 3 | 2015-08-03T22:26:36.000Z | 2020-03-30T04:19:26.000Z | utilities/csv_to_json.py | sachinio/redalert | b50d65757e80ec10c5fa220133c5206ea50a2c46 | [
"MIT"
] | 1 | 2015-10-19T06:39:21.000Z | 2015-10-19T06:39:21.000Z | utilities/csv_to_json.py | sachinio/redalert | b50d65757e80ec10c5fa220133c5206ea50a2c46 | [
"MIT"
] | 5 | 2015-10-08T22:48:45.000Z | 2020-03-30T04:19:27.000Z | __author__ = 'sachinpatney'
import json
import sys
sys.path.append('/var/www/git/redalert/tasks')
from common import read_csv_as_list
print(json.dumps(read_csv_as_list(sys.argv[1]))) | 18.6 | 48 | 0.784946 |
2ef0a9066f762c849cae39c9ff6147d74bab1e90 | 3,683 | py | Python | python/verkefni4/src/setup.py | asgeir/old-school-projects | 96a502589c627e4556f9ee14fc1dc21ed53ce28a | [
"MIT"
] | null | null | null | python/verkefni4/src/setup.py | asgeir/old-school-projects | 96a502589c627e4556f9ee14fc1dc21ed53ce28a | [
"MIT"
] | null | null | null | python/verkefni4/src/setup.py | asgeir/old-school-projects | 96a502589c627e4556f9ee14fc1dc21ed53ce28a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
about = {}
with open("healthybeard/about.py") as fp:
exec(fp.read(), about)
setup(
name=about['title'],
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version=about['version'],
description=about['description'],
long_description=long_description,
# The project's main homepage.
url=about['url'],
# Author details
author=about['author'],
author_email=about['email'],
# Choose your license
license=about['license'],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
],
# What does your project relate to?
keywords='homework piracy',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'PyYAML>=3,<4'
],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
# extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'hbrd=healthybeard.tools.hbrd:main',
],
},
)
| 35.413462 | 91 | 0.662775 |
041c0b54433e26b701db4ae385a86c9ba9abfc1b | 1,133 | py | Python | recpkg/preprocessing.py | mianuddin/csc492_recommender_pkg | 3c89bb1f4ef2a34ed4f9bb6a99ae623eaee1954b | [
"MIT"
] | null | null | null | recpkg/preprocessing.py | mianuddin/csc492_recommender_pkg | 3c89bb1f4ef2a34ed4f9bb6a99ae623eaee1954b | [
"MIT"
] | null | null | null | recpkg/preprocessing.py | mianuddin/csc492_recommender_pkg | 3c89bb1f4ef2a34ed4f9bb6a99ae623eaee1954b | [
"MIT"
] | null | null | null | import numpy as np
from tensorflow import keras
from tensorflow.keras.layers.experimental \
import preprocessing # pylint: disable=no-name-in-module
def get_standard_layers(values, name=None):
"""Returns input layer and standard preprocessing layers for given values.
Returns the input and preprocessing layers for the given integer values.
The preprocessing consists of `IntegerLookup` and one-hot encoding via
`CategoryEncoding`.
Args:
values (ndarray): The integer values of the desired input.
name (String): The name of the values.
Returns:
Tuple[Layer, Layer]: The input and preprocessing layers.
"""
unique_values = np.unique(values)
input_layer = keras.Input(shape=(1), name=name, dtype="int64")
indexer = preprocessing.IntegerLookup(max_tokens=len(unique_values))
indexer.adapt(unique_values)
encoder = preprocessing.CategoryEncoding(
num_tokens=len(indexer.get_vocabulary()),
output_mode="binary"
)
encoder.adapt(indexer(unique_values))
pp_layers = encoder(indexer(input_layer))
return input_layer, pp_layers
| 34.333333 | 78 | 0.724625 |
8ad63e286943d9fd994f721ae82a76f3ecb4f51f | 2,474 | py | Python | djaio/core/logs.py | domclick/djaio | f69f70e575f72fd3679ecf7fdb53f4531d20a590 | [
"Apache-2.0"
] | 5 | 2019-02-19T10:36:52.000Z | 2021-02-19T22:58:21.000Z | djaio/core/logs.py | resslerruntime/djaio | f69f70e575f72fd3679ecf7fdb53f4531d20a590 | [
"Apache-2.0"
] | 2 | 2021-02-26T02:12:45.000Z | 2022-01-21T18:56:26.000Z | djaio/core/logs.py | resslerruntime/djaio | f69f70e575f72fd3679ecf7fdb53f4531d20a590 | [
"Apache-2.0"
] | 1 | 2021-12-11T03:26:30.000Z | 2021-12-11T03:26:30.000Z | # -*- coding: utf-8 -*-
import sys
import logging.config
from djaio.core.utils import deep_merge
class ColoredFormatter(logging.Formatter):
RESET = '\x1B[0m'
RED = '\x1B[31m'
YELLOW = '\x1B[33m'
BRGREEN = '\x1B[01;32m' # grey in solarized for terminals
def format(self, record, color=False):
message = super().format(record)
if not color:
return message
level_no = record.levelno
if level_no >= logging.CRITICAL:
color = self.RED
elif level_no >= logging.ERROR:
color = self.RED
elif level_no >= logging.WARNING:
color = self.YELLOW
elif level_no >= logging.INFO:
color = self.RESET
elif level_no >= logging.DEBUG:
color = self.BRGREEN
else:
color = self.RESET
message = color + message + self.RESET
return message
class ColoredHandler(logging.StreamHandler):
def __init__(self, stream=sys.stdout):
super().__init__(stream)
def format(self, record, colour=False):
if not isinstance(self.formatter, ColoredFormatter):
self.formatter = ColoredFormatter('[%(asctime)s] [%(levelname)s][MODULE:"%(module)s"] - "%(message)s"')
return self.formatter.format(record, colour)
def emit(self, record):
stream = self.stream
try:
msg = self.format(record, stream.isatty())
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] [%(levelname)s][MODULE:"%(module)s"] - "%(message)s"'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'djaio_logger': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
def setup(app):
logger_config = getattr(app.settings, 'LOGGING', {})
_LOGGING = deep_merge(LOGGING, logger_config)
if app.settings.DEBUG:
_LOGGING['handlers']['console']['class'] = 'djaio.core.logs.ColoredHandler'
logging.config.dictConfig(_LOGGING)
| 27.488889 | 115 | 0.559418 |
6faaf910de85d0185f1f18874c7dfef1be0bac03 | 3,547 | py | Python | setup.py | MarcelWaldvogel/parsedmarc | 3f1e25e315ae4bf7b309a75f89bc6648cc0a81b4 | [
"Apache-2.0"
] | 1 | 2021-05-13T22:16:31.000Z | 2021-05-13T22:16:31.000Z | setup.py | MarcelWaldvogel/parsedmarc | 3f1e25e315ae4bf7b309a75f89bc6648cc0a81b4 | [
"Apache-2.0"
] | null | null | null | setup.py | MarcelWaldvogel/parsedmarc | 3f1e25e315ae4bf7b309a75f89bc6648cc0a81b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
from parsedmarc import __version__
description = "A Python package and CLI for parsing aggregate and " \
"forensic DMARC reports"
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='parsedmarc',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=description,
long_description=long_description,
# The project's main homepage.
url='https://domainaware.github.io/parsedmarc',
# Author details
author='Sean Whalen',
author_email='whalenster@gmail.com',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
"Intended Audience :: Information Technology",
'Operating System :: OS Independent',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
# What does your project relate to?
keywords='DMARC, reporting, parser',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["parsedmarc"],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["parsedmarc"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['dnspython>=2.0.0', 'expiringdict>=1.1.4',
'publicsuffix2>=2.20190812', 'xmltodict>=0.12.0',
'geoip2>=3.0.0', 'urllib3>=1.25.7',
'requests>=2.22.0', 'imapclient>=2.1.0',
'dateparser>=0.7.2',
'mailsuite>=1.6.1',
'elasticsearch-dsl>=7.2.0,<8.0.0',
'kafka-python>=1.4.4',
'tqdm>=4.31.1',
'lxml>=4.4.0'
],
entry_points={
'console_scripts': ['parsedmarc=parsedmarc.cli:_main'],
}
)
| 32.842593 | 79 | 0.624753 |
91bce08192d2c7ea591aa55755397e94e8d83ad7 | 252 | py | Python | check_test_plan_gen.py | warmchang/sieve | 57c2fa7d773ee344d517794f53730548f574cd3f | [
"BSD-2-Clause"
] | null | null | null | check_test_plan_gen.py | warmchang/sieve | 57c2fa7d773ee344d517794f53730548f574cd3f | [
"BSD-2-Clause"
] | null | null | null | check_test_plan_gen.py | warmchang/sieve | 57c2fa7d773ee344d517794f53730548f574cd3f | [
"BSD-2-Clause"
] | null | null | null | from evaluation_sanity_check import check, generate
import sys
current_dir = "log"
previous_dir = sys.argv[1]
generate.generate_test_plan_stat()
check.check_massive_testing_results(current_dir, previous_dir)
check.check_bug_reproduction_test_plans()
| 25.2 | 62 | 0.853175 |
306d4f7d8a5e00da4952af0da2f3f6286145a106 | 3,553 | py | Python | deploy/cassandra_restore_v2.py | surabhi-mahawar/sunbird-devops | 1b0f4f5aaabd3f9b5a2c16da2a1df0df436be490 | [
"MIT"
] | 51 | 2017-07-05T12:52:17.000Z | 2021-12-16T11:35:59.000Z | deploy/cassandra_restore_v2.py | surabhi-mahawar/sunbird-devops | 1b0f4f5aaabd3f9b5a2c16da2a1df0df436be490 | [
"MIT"
] | 338 | 2017-09-21T10:18:19.000Z | 2022-03-31T11:26:13.000Z | deploy/cassandra_restore_v2.py | surabhi-mahawar/sunbird-devops | 1b0f4f5aaabd3f9b5a2c16da2a1df0df436be490 | [
"MIT"
] | 531 | 2017-08-10T10:47:41.000Z | 2022-03-31T06:43:32.000Z | #!/usr/bin/env python3
# This program will copy the snapshots to cassandra data directory
# This program will only work in linux, as it's utilizing 'cp' for copying as shutil and copy_tree
# are not handling hardlink, existing directory combo.
#
# Author Rajesh Rajendran <rjshrjndrn@gmail.com>
# Restore process is based on the following approach
# Ref: https://docs.datastax.com/en/cassandra-oss/2.1/cassandra/operations/ops_snapshot_restore_new_cluster.html
# Steps:
# Update token ring:
# sudo systemctl stop cassandra
# open /etc/cassadandra/cassandra.yaml and append
# initial_token: < contentents from tokenring.txt from the backup directory (only from that node). >
# rm -rf /var/lib/cassandra/*
# sudo systemctl start cassandra
# Restore the schema, from backup directory.
# cqlsh -f /path/to/backup-dir/db_schema.sql
# Restore data.
# sudo systemctl stop cassandra.
# sudo python3 cassandra_restore_copy.py --snapshotdir <path to backup dir>
# sudo chown -R cassandra:cassandra /var/lib/cassandra
# sudo systemctl start cassandra
# Important:
# Clean up initial_token:
# Once `nodetool status` is UN for all nodes
# open /etc/cassandra/cassandra.yaml and remove `initial_token`.
# Note: Don't have to restart cassandra after removing initial_token.
import os
import shutil
from argparse import ArgumentParser
from collections import defaultdict
from subprocess import STDOUT, call
import concurrent.futures
parser = ArgumentParser(description="Restore cassandra snapshot")
parser.add_argument("-d", "--datadirectory", metavar="datadir", default='/var/lib/cassandra/data',
help="Path to cassadandra keyspaces. Default /var/lib/cassadra/data")
parser.add_argument("-w", "--workers", metavar="workers",
default=os.cpu_count(), help="Number of workers to use. Default same as cpu cores {}".format(os.cpu_count()))
parser.add_argument("--snapshotdir", default="cassandra_backup", metavar="< Default: cassandra_backup >", help="snapshot directory name or path")
args = parser.parse_args()
# copy function
def customCopy(root, root_target_dir):
print("copying {} to {}".format(root, root_target_dir))
# Shuti and cp_tree are not good enough for nested hard links.
call(["cp -arl " + root + " " + root_target_dir],shell=True, stderr=STDOUT)
# ks_tb_pair = {ks_name: ['table1', 'table2']}
ks_tb_pair = defaultdict(list)
def create_ks_tb_pair(path):
# containes parsed path
# backupdir/sunbirdplugin/announcement-6bc5074070ef11e995fbf564f8591b58
# [backupdir,sunbirdplugin,announcement-6bc5074070ef11e995fbf564f8591b58]
parsed_data = path.split('/')
# splitting parsed_data[-1] with '-' because we don't need UUID
ks_tb_pair[parsed_data[-2]].append(parsed_data[-1].split('-')[0])
# print(ks_tb_pair)
# Copy content to datadirectory
customCopy(path+'/*', args.datadirectory+os.sep+parsed_data[-2]+os.sep+parsed_data[-1].split("-")[0]+"-*")
# Traverse through backup dir and create keyspace table pair
# Unix autocompletion for directory will append '/', which will break restore process
snap_dir = args.snapshotdir.rstrip('/')
root_levels = snap_dir.count(os.sep)
for root, dirs, files in os.walk(snap_dir):
# our tables will be under /keyspace/table
if root.count(os.sep) == root_levels + 2:
# output will be like
# backupdir/sunbirdplugin/announcement-6bc5074070ef11e995fbf564f8591b58
# backupdir/sunbirdplugin/groupmember-8c7eb6c070ef11e9858587f867de3ce2
# print(root)
create_ks_tb_pair(root)
| 48.671233 | 145 | 0.741345 |
9ad4ff6187b1020ee6f82af7b5e6ab7983a21d38 | 3,502 | py | Python | easy_comment/views.py | r26zhao/django-easy-comment | f9ef4a2a8a567c7f08d0059c4b5550fa735de34d | [
"MIT"
] | 318 | 2017-07-16T04:01:14.000Z | 2021-10-08T06:16:16.000Z | easy_comment/views.py | r26zhao/django-easy-comment | f9ef4a2a8a567c7f08d0059c4b5550fa735de34d | [
"MIT"
] | 2 | 2018-05-17T15:27:13.000Z | 2019-11-26T09:04:32.000Z | easy_comment/views.py | r26zhao/django-easy-comment | f9ef4a2a8a567c7f08d0059c4b5550fa735de34d | [
"MIT"
] | 64 | 2017-08-05T13:31:33.000Z | 2020-05-30T08:07:04.000Z | from django.apps import apps
from django.conf import settings
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger, InvalidPage
from django.contrib.contenttypes.models import ContentType
import easy_comment.handlers
from easy_comment.forms import CommentForm
from easy_comment.templatetags.comment_tags import get_comments_user_count, get_comment_list
# Create your views here.
# 该视图函数只接受post请求, 只允许登录用户评论
@require_POST
@login_required
def submit_comment(request):
"""
通过前端ajax post数据,生成评论,并返回评论对应的html和参与评论的用户数量、评论数量
:param request:
:return: JsonResponse
"""
form = CommentForm(data=request.POST)
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.user = request.user
type_id = request.POST.get('content_type')
object_id = request.POST.get('object_id')
if type_id and object_id:
content_type = ContentType.objects.get_for_id(type_id)
content_object = content_type.get_object_for_this_type(id=object_id)
else:
content_object = None
new_comment.content_object = content_object
new_comment.save()
cmt_list = get_comment_list(content_object).order_by('-created')
user_count = get_comments_user_count(entry=content_object)
comment_count = cmt_list.count()
paginate_by = getattr(settings, 'COMMENT_PAGINATE_BY', 10)
if paginate_by:
cmt_list = cmt_list[: paginate_by]
comment_list_html = ''
for comment in cmt_list:
comment_list_html += comment.to_html()
return JsonResponse({'msg': 'success!',
'html': comment_list_html,
'user_count': user_count,
'comment_count': comment_count})
# 如果content字段出问题了,把error记录在msg里,在前端alert
if form.errors.as_data()['content']:
msg = form.errors.as_data()['content'][0].message
else:
msg = '评论出错啦!'
return JsonResponse({"msg": msg})
def comment_list(request, pk=None):
"""
获取评论list,如果settings.py里设置了COMMENT_PAGINATE_BY(默认10),则对评论分页
设置 COMMENT_PAGINATE_BY = None则不分页
把每一条评论的html加在一起,生成所有评论的html
jsonresponse 里返回html,评论数量、评论人数
:param request:
:param pk: 被评论模型(example:文章)的pk
:return: JsonResponse
"""
try:
type_id = request.GET.get('content_type')
content_type = ContentType.objects.get_for_id(type_id)
entry = content_type.get_object_for_this_type(id=pk)
except Exception as e:
entry = None
paginate_by = getattr(settings, 'COMMENT_PAGINATE_BY', 10)
cmt_list = get_comment_list(entry=entry).order_by('-created')
comment_count = cmt_list.count()
user_count = get_comments_user_count(entry=entry)
if paginate_by:
paginator = Paginator(cmt_list, paginate_by)
page = request.GET.get('page', 1)
try:
cmt_list = paginator.page(page)
except PageNotAnInteger:
cmt_list = paginator.page(1)
except EmptyPage:
cmt_list = []
comment_list_html = ''
for comment in cmt_list:
comment_list_html += comment.to_html()
return JsonResponse({'html': comment_list_html,
'user_count': user_count,
'comment_count': comment_count})
| 37.255319 | 92 | 0.674757 |
c8939a9b92e4c12fc4076e391c836240507aab50 | 45,430 | py | Python | tests/functional/samples/test_samples_linux_runtime.py | ravi9/docker_ci | 895aacc4eec6158526f63a4de25af0c0bc369c1f | [
"Apache-2.0"
] | null | null | null | tests/functional/samples/test_samples_linux_runtime.py | ravi9/docker_ci | 895aacc4eec6158526f63a4de25af0c0bc369c1f | [
"Apache-2.0"
] | null | null | null | tests/functional/samples/test_samples_linux_runtime.py | ravi9/docker_ci | 895aacc4eec6158526f63a4de25af0c0bc369c1f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2019-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from utils.exceptions import FailedTestError
@pytest.mark.usefixtures('_is_image_os', '_is_distribution', '_is_package_url_specified')
@pytest.mark.parametrize('_is_image_os', [('ubuntu18', 'ubuntu20', 'centos7', 'rhel8')], indirect=True)
@pytest.mark.parametrize('_is_distribution', [('runtime', 'custom-no-omz')], indirect=True)
class TestSamplesLinuxRuntime:
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_hello_classification_cpp_cpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir -r '
'/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_classification '
'/root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'/opt/intel/openvino/deployment_tools/demo/car.png CPU"',
], self.test_hello_classification_cpp_cpu.__name__, **kwargs,
)
@pytest.mark.gpu
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_hello_classification_cpp_gpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/dri:/dev/dri'],
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir -r '
'/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_classification '
'/root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'/opt/intel/openvino/deployment_tools/demo/car.png GPU"',
], self.test_hello_classification_cpp_gpu.__name__, **kwargs,
)
@pytest.mark.vpu
@pytest.mark.usefixtures('_python_vpu_plugin_required')
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_hello_classification_cpp_vpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'device_cgroup_rules': ['c 189:* rmw'],
'mem_limit': '3g',
'volumes': {
'/dev/bus/usb': {
'bind': '/dev/bus/usb',
},
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir -r '
'/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_classification '
'/root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'/opt/intel/openvino/deployment_tools/demo/car.png MYRIAD"',
], self.test_hello_classification_cpp_vpu.__name__, **kwargs,
)
@pytest.mark.hddl
@pytest.mark.usefixtures('_is_not_image_os')
@pytest.mark.parametrize('_is_not_image_os', [('rhel8')], indirect=True)
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_hello_classification_cpp_hddl(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/ion:/dev/ion'],
'mem_limit': '3g',
'volumes': {
'/var/tmp': {'bind': '/var/tmp'}, # nosec # noqa: S108
'/dev/shm': {'bind': '/dev/shm'}, # nosec # noqa: S108
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir -r '
'/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && umask 0000 && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_classification '
'/root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'/opt/intel/openvino/deployment_tools/demo/car.png HDDL && rm -f /dev/shm/hddl_*"',
], self.test_hello_classification_cpp_hddl.__name__, **kwargs,
)
def test_hello_classification_cpp_fail(self, tester, image, dev_root, install_openvino_dependencies, caplog):
kwargs = {
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
with pytest.raises(FailedTestError):
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir -r '
'/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-attributes-recognition-barrier-0039 --precisions FP32 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_classification '
'/root/inference_engine_cpp_samples_build/intel64/Release/intel/'
'vehicle-attributes-recognition-barrier-0039/FP32/'
'vehicle-attributes-recognition-barrier-0039.xml '
'/opt/intel/openvino/deployment_tools/demo/car.png CPU"',
], self.test_hello_classification_cpp_fail.__name__, **kwargs,
)
if 'Sample supports topologies with 1 output only' not in caplog.text:
pytest.fail('Sample supports topologies with 1 output only')
def test_hello_reshape_cpp_cpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_reshape_ssd '
'/root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'/opt/intel/openvino/deployment_tools/demo/car_1.bmp CPU 1"',
], self.test_hello_reshape_cpp_cpu.__name__, **kwargs,
)
@pytest.mark.gpu
def test_hello_reshape_cpp_gpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/dri:/dev/dri'],
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_reshape_ssd '
'/root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'/opt/intel/openvino/deployment_tools/demo/car_1.bmp GPU 1"',
], self.test_hello_reshape_cpp_gpu.__name__, **kwargs,
)
@pytest.mark.vpu
@pytest.mark.usefixtures('_python_vpu_plugin_required')
def test_hello_reshape_cpp_vpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'device_cgroup_rules': ['c 189:* rmw'],
'mem_limit': '3g',
'volumes': {
'/dev/bus/usb': {
'bind': '/dev/bus/usb',
},
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_reshape_ssd '
'/root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'/opt/intel/openvino/deployment_tools/demo/car_1.bmp MYRIAD 1"',
], self.test_hello_reshape_cpp_vpu.__name__, **kwargs,
)
@pytest.mark.hddl
@pytest.mark.usefixtures('_is_not_image_os')
@pytest.mark.parametrize('_is_not_image_os', [('rhel8')], indirect=True)
def test_hello_reshape_cpp_hddl(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/ion:/dev/ion'],
'mem_limit': '3g',
'volumes': {
'/var/tmp': {'bind': '/var/tmp'}, # nosec # noqa: S108
'/dev/shm': {'bind': '/dev/shm'}, # nosec # noqa: S108
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && umask 0000 && '
'/root/inference_engine_cpp_samples_build/intel64/Release/hello_reshape_ssd '
'/root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'/opt/intel/openvino/deployment_tools/demo/car_1.bmp HDDL 1 && rm -f /dev/shm/hddl_*"',
], self.test_hello_reshape_cpp_hddl.__name__, **kwargs,
)
def test_object_detection_cpp_cpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/object_detection_sample_ssd '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d CPU"',
], self.test_object_detection_cpp_cpu.__name__, **kwargs,
)
@pytest.mark.gpu
def test_object_detection_cpp_gpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/dri:/dev/dri'],
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/object_detection_sample_ssd '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d GPU"',
], self.test_object_detection_cpp_gpu.__name__, **kwargs,
)
@pytest.mark.vpu
@pytest.mark.usefixtures('_python_vpu_plugin_required')
def test_object_detection_cpp_vpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'device_cgroup_rules': ['c 189:* rmw'],
'mem_limit': '3g',
'volumes': {
'/dev/bus/usb': {
'bind': '/dev/bus/usb',
},
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/object_detection_sample_ssd '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d MYRIAD"',
], self.test_object_detection_cpp_vpu.__name__, **kwargs,
)
@pytest.mark.hddl
@pytest.mark.usefixtures('_is_not_image_os')
@pytest.mark.parametrize('_is_not_image_os', [('rhel8')], indirect=True)
def test_object_detection_cpp_hddl(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/ion:/dev/ion'],
'mem_limit': '3g',
'volumes': {
'/var/tmp': {'bind': '/var/tmp'}, # nosec # noqa: S108
'/dev/shm': {'bind': '/dev/shm'}, # nosec # noqa: S108
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name vehicle-detection-adas-0002 --precisions FP16 '
'-o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && umask 0000 && '
'/root/inference_engine_cpp_samples_build/intel64/Release/object_detection_sample_ssd '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/intel/vehicle-detection-adas-0002/FP16/'
'vehicle-detection-adas-0002.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d HDDL && rm -f /dev/shm/hddl_*"',
], self.test_object_detection_cpp_hddl.__name__, **kwargs,
)
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_classification_async_cpp_cpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 -B mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/classification_sample_async '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d CPU"',
], self.test_classification_async_cpp_cpu.__name__, **kwargs,
)
@pytest.mark.gpu
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_classification_async_cpp_gpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/dri:/dev/dri'],
'mem_limit': '3g',
'volumes': {
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 -B mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/classification_sample_async '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d GPU"',
], self.test_classification_async_cpp_gpu.__name__, **kwargs,
)
@pytest.mark.vpu
@pytest.mark.usefixtures('_python_vpu_plugin_required')
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_classification_async_cpp_vpu(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/dri:/dev/dri'],
'mem_limit': '3g',
'volumes': {
'/dev/bus/usb': {
'bind': '/dev/bus/usb',
},
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir -r '
'/opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 -B mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel --data_type FP16"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'/root/inference_engine_cpp_samples_build/intel64/Release/classification_sample_async '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d MYRIAD"',
], self.test_classification_async_cpp_vpu.__name__, **kwargs,
)
@pytest.mark.hddl
@pytest.mark.usefixtures('_is_not_image_os')
@pytest.mark.parametrize('_is_not_image_os', [('rhel8')], indirect=True)
@pytest.mark.xfail_log(pattern='Error: Download',
reason='Network problems when downloading alexnet files')
def test_classification_async_cpp_hddl(self, tester, image, dev_root, install_openvino_dependencies):
kwargs = {
'devices': ['/dev/ion:/dev/ion'],
'mem_limit': '3g',
'volumes': {
'/var/tmp': {'bind': '/var/tmp'}, # nosec # noqa: S108
'/dev/shm': {'bind': '/dev/shm'}, # nosec # noqa: S108
dev_root / 'deployment_tools' / 'inference_engine' / 'samples' / 'cpp': {
'bind': '/opt/intel/openvino/inference_engine/samples/cpp',
},
dev_root / 'deployment_tools' / 'demo': {
'bind': '/opt/intel/openvino/deployment_tools/demo',
},
dev_root / 'deployment_tools' / 'open_model_zoo': {
'bind': '/opt/intel/openvino/deployment_tools/open_model_zoo',
},
dev_root / 'deployment_tools' / 'model_optimizer': {
'bind': '/opt/intel/openvino/deployment_tools/model_optimizer',
},
},
}
tester.test_docker_image(
image,
[install_openvino_dependencies,
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir cmake setuptools && '
'cd /opt/intel/openvino/inference_engine/samples/cpp && '
'/opt/intel/openvino/inference_engine/samples/cpp/build_samples.sh"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'python3 -m pip install --no-cache-dir '
'-r /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/requirements.in && '
'python3 -B /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py '
'--name alexnet --precisions FP16 -o /root/inference_engine_cpp_samples_build/intel64/Release/"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && '
'cd /opt/intel/openvino/deployment_tools/model_optimizer && '
'python3 -m pip install --no-cache-dir -r requirements_caffe.txt && '
'python3 -B mo.py --output_dir /root/inference_engine_cpp_samples_build/intel64/Release/public '
'--input_model /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet/'
'alexnet.caffemodel"',
'/bin/bash -ac ". /opt/intel/openvino/bin/setupvars.sh && umask 0000 && '
'/root/inference_engine_cpp_samples_build/intel64/Release/classification_sample_async '
'-m /root/inference_engine_cpp_samples_build/intel64/Release/public/alexnet.xml '
'-i /opt/intel/openvino/deployment_tools/demo/car_1.bmp -d HDDL && rm -f /dev/shm/hddl_*"',
], self.test_classification_async_cpp_hddl.__name__, **kwargs,
)
| 58.923476 | 114 | 0.593198 |
637dfcdef64e1cea794885acf560cb22322023d1 | 28,997 | py | Python | pyofx/ofx_property_defs.py | davebarkeruk/pyofx | 1f5b637a4036a476d29c4ef05a14902ad2b6aa58 | [
"MIT"
] | 5 | 2020-09-09T07:05:24.000Z | 2020-12-28T08:34:56.000Z | pyofx/ofx_property_defs.py | davebarkeruk/pyofx | 1f5b637a4036a476d29c4ef05a14902ad2b6aa58 | [
"MIT"
] | 12 | 2020-08-26T23:35:11.000Z | 2020-09-02T15:55:42.000Z | pyofx/ofx_property_defs.py | davebarkeruk/pyofx | 1f5b637a4036a476d29c4ef05a14902ad2b6aa58 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
#
# Copyright 2020 by David Barker.
# All rights reserved.
# This file is part of pyofx the Python3 based OpenFX plugin render host,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
OFX_PROPERTY_DEFS = {
'OfxImageClipPropConnected': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageClipPropContinuousSamples': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageClipPropFieldExtraction': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxImageFieldDoubled',
'valid_values': [
'OfxImageFieldBoth',
'OfxImageFieldSingle',
'OfxImageFieldDoubled'
]
},
'OfxImageClipPropFieldOrder': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxImageFieldNone',
'valid_values': [
'OfxImageFieldNone',
'OfxImageFieldLower',
'OfxImageFieldUpper'
]
},
'OfxImageClipPropIsMask': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageClipPropOptional': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageClipPropUnmappedComponents': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageComponentNone',
'OfxImageComponentRGBA',
'OfxImageComponentRGB',
'OfxImageComponentAlpha'
]
},
'OfxImageClipPropUnmappedPixelDepth': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxBitDepthNone',
'OfxBitDepthByte',
'OfxBitDepthShort',
'OfxBitDepthHalf',
'OfxBitDepthFloat'
]
},
'OfxImageEffectFrameVarying': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectHostPropIsBackground': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectHostPropNativeOrigin': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageEffectHostPropNativeOriginBottomLeft',
'OfxImageEffectHostPropNativeOriginTopLeft',
'OfxImageEffectHostPropNativeOriginCenter'
]
},
'OfxImageEffectInstancePropEffectDuration': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImageEffectInstancePropSequentialRender': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1, 2]
},
'OfxImageEffectPluginPropFieldRenderTwiceAlways': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxImageEffectPluginPropGrouping': {
'param_type': 'str',
'dimensions': 1,
'default': '',
'valid_values': None
},
'OfxImageEffectPluginPropHostFrameThreading': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectPluginPropOverlayInteractV1': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxImageEffectPluginPropSingleInstance': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectPluginRenderThreadSafety': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxImageEffectRenderInstanceSafe',
'valid_values': [
'OfxImageEffectRenderUnsafe',
'OfxImageEffectRenderInstanceSafe',
'OfxImageEffectRenderFullySafe'
]
},
'OfxImageEffectPropClipPreferencesSlaveParam': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': None
},
'OfxImageEffectPropComponents': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageComponentNone',
'OfxImageComponentRGBA',
'OfxImageComponentRGB',
'OfxImageComponentAlpha'
]
},
'OfxImageEffectPropContext': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageEffectContextGenerator',
'OfxImageEffectContextFilter',
'OfxImageEffectContextTransition',
'OfxImageEffectContextPaint',
'OfxImageEffectContextGeneral',
'OfxImageEffectContextRetimer'
]
},
'OfxImageEffectPropFieldToRender': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageFieldNone',
'OfxImageFieldBoth',
'OfxImageFieldLower',
'OfxImageFieldUpper'
]
},
'OfxImageEffectPropFrameRange': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxImageEffectPropFrameRate': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImageEffectPropFrameStep': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImageEffectPropInteractiveRenderStatus': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectPropOpenGLEnabled': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectPropOpenGLRenderSupported': {
'param_type': 'str',
'dimensions': 1,
'default': 'false',
'valid_values': [
'false',
'true',
'needed'
]
},
'OfxImageEffectPropOpenGLTextureIndex': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImageEffectPropOpenGLTextureTarget': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImageEffectPropPixelDepth': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxBitDepthNone',
'OfxBitDepthByte',
'OfxBitDepthShort',
'OfxBitDepthHalf',
'OfxBitDepthFloat'
]
},
'OfxImageEffectPropPluginHandle': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxImageEffectPropPreMultiplication': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageOpaque',
'OfxImagePreMultiplied',
'OfxImageUnPreMultiplied'
]
},
'OfxImageEffectPropProjectExtent': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxImageEffectPropProjectOffset': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxImageEffectPropPixelAspectRatio': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImageEffectPropProjectSize': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxImageEffectPropRegionOfDefinition': {
'param_type': 'dbl',
'dimensions': 4,
'default': None,
'valid_values': None
},
'OfxImageEffectPropRegionOfInterest': {
'param_type': 'dbl',
'dimensions': 4,
'default': None,
'valid_values': None
},
'OfxImageEffectPropRenderQualityDraft': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectPropRenderScale': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxImageEffectPropRenderWindow': {
'param_type': 'dbl',
'dimensions': 4,
'default': None,
'valid_values': None
},
'OfxImageEffectPropSequentialRenderStatus': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectPropSetableFielding': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectPropSetableFrameRate': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectPropSupportedComponents': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': [
'OfxImageComponentNone',
'OfxImageComponentRGBA',
'OfxImageComponentRGB',
'OfxImageComponentAlpha'
]
},
'OfxImageEffectPropSupportedContexts': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': [
'OfxImageEffectContextGenerator',
'OfxImageEffectContextFilter',
'OfxImageEffectContextTransition',
'OfxImageEffectContextPaint',
'OfxImageEffectContextGeneral',
'OfxImageEffectContextRetimer'
]
},
'OfxImageEffectPropSupportedPixelDepths': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': [
'OfxBitDepthNone',
'OfxBitDepthByte',
'OfxBitDepthShort',
'OfxBitDepthHalf',
'OfxBitDepthFloat'
]
},
'OfxImageEffectPropSupportsMultiResolution': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxImageEffectPropMultipleClipDepths': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectPropSupportsMultipleClipPARs': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectPropSupportsOverlays': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxImageEffectPropSupportsTiles': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxImageEffectPropTemporalClipAccess': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxImageEffectPropUnmappedFrameRange': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxImageEffectPropUnmappedFrameRate': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImagePropBounds': {
'param_type': 'int',
'dimensions': 4,
'default': None,
'valid_values': None
},
'OfxImagePropData': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxImagePropField': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxImageFieldNone',
'OfxImageFieldBoth',
'OfxImageFieldLower',
'OfxImageFieldUpper'
]
},
'OfxImagePropPixelAspectRatio': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImagePropRegionOfDefinition': {
'param_type': 'int',
'dimensions': 4,
'default': None,
'valid_values': None
},
'OfxImagePropRowBytes': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxImagePropUniqueIdentifier': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxInteractPropBackgroundColour': {
'param_type': 'dbl',
'dimensions': 3,
'default': None,
'valid_values': None
},
'OfxInteractPropBitDepth': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxInteractPropHasAlpha': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxInteractPropPenPosition': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxInteractPropPenPressure': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxInteractPropPenViewportPosition': {
'param_type': 'int',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxInteractPropPixelScale': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxInteractPropSlaveToParam': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': None
},
'OfxInteractPropSuggestedColour': {
'param_type': 'dbl',
'dimensions': 3,
'default': [1.0, 1.0, 1.0],
'valid_values': None
},
'OfxOpenGLPropPixelDepth': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': [
'OfxBitDepthNone',
'OfxBitDepthByte',
'OfxBitDepthShort',
'OfxBitDepthHalf',
'OfxBitDepthFloat'
]
},
'OfxParamHostPropMaxPages': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxParamHostPropMaxParameters': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxParamHostPropPageRowColumnCount': {
'param_type': 'int',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxParamHostPropSupportsBooleanAnimation': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamHostPropSupportsChoiceAnimation': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamHostPropSupportsCustomAnimation': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamHostPropSupportsCustomInteract': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamHostPropSupportsParametricAnimation': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamHostPropSupportsStringAnimation': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamPropAnimates': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropCacheInvalidation': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxParamInvalidateValueChange',
'valid_values': [
'OfxParamInvalidateValueChange',
'OfxParamInvalidateValueChangeToEnd',
'OfxParamInvalidateAll'
]
},
'OfxParamPropCanUndo': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropChoiceOption': {
'param_type': 'str',
'dimensions': 0,
'default': None,
'valid_values': None
},
'OfxParamPropCustomCallbackV1': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxParamPropCustomValue': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxParamPropDataPtr': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxParamPropDefault': {
'param_type': ['int', 'dbl', 'str'],
'dimensions': 0,
'default': None,
'valid_values': None
},
'OfxParamPropDefaultCoordinateSystem': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxParamCoordinatesCanonical',
'valid_values': [
'OfxParamCoordinatesCanonical',
'OfxParamCoordinatesNormalised'
]
},
'OfxParamPropDigits': {
'param_type': 'int',
'dimensions': 1,
'default': 2,
'valid_values': None
},
'OfxParamPropDimensionLabel': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxParamPropDisplayMax': {
'param_type': ['int', 'dbl'],
'dimensions': 0,
'default': [9999999999],
'valid_values': None
},
'OfxParamPropDisplayMin': {
'param_type': ['int', 'dbl'],
'dimensions': 0,
'default': [-9999999999],
'valid_values': None
},
'OfxParamPropDoubleType': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxParamDoubleTypePlain',
'valid_values': [
'OfxParamDoubleTypePlain',
'OfxParamDoubleTypeAngle',
'OfxParamDoubleTypeScale',
'OfxParamDoubleTypeTime',
'OfxParamDoubleTypeAbsoluteTime'
]
},
'OfxParamPropEnabled': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropEvaluateOnChange': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropGroupOpen': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropHasHostOverlayHandle': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamPropHint': {
'param_type': 'str',
'dimensions': 1,
'default': '',
'valid_values': None
},
'OfxParamPropIncrement': {
'param_type': 'dbl',
'dimensions': 1,
'default': 1,
'valid_values': None
},
'OfxParamPropInteractMinimumSize': {
'param_type': 'dbl',
'dimensions': 2,
'default': [10, 10],
'valid_values': None
},
'OfxParamPropInteractPreferedSize': {
'param_type': 'int',
'dimensions': 2,
'default': [10, 10],
'valid_values': None
},
'OfxParamPropInteractSize': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxParamPropInteractSizeAspect': {
'param_type': 'dbl',
'dimensions': 1,
'default': 1,
'valid_values': None
},
'OfxParamPropInteractV1': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxParamPropInterpolationAmount': {
'param_type': 'dbl',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxParamPropInterpolationTime': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxParamPropIsAnimating': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamPropIsAutoKeying': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'OfxParamPropMax': {
'param_type': ['int', 'dbl'],
'dimensions': 0,
'default': [9999999999],
'valid_values': None
},
'OfxParamPropMin': {
'param_type': ['int', 'dbl'],
'dimensions': 0,
'default': [-9999999999],
'valid_values': None
},
'OfxParamPropPageChild': {
'param_type': 'str',
'dimensions': 0,
'default': [''],
'valid_values': None
},
'OfxParamPropParametricDimension': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': None
},
'OfxParamPropParametricInteractBackground': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxParamPropParametricRange': {
'param_type': 'dbl',
'dimensions': 2,
'default': None,
'valid_values': None
},
'OfxParamPropParametricUIColour': {
'param_type': 'dbl',
'dimensions': 3,
'default': None,
'valid_values': None
},
'OfxParamPropParent': {
'param_type': 'str',
'dimensions': 1,
'default': '',
'valid_values': None
},
'OfxParamPropPersistant': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropPluginMayWrite': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxParamPropScriptName': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxParamPropSecret': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxParamPropShowTimeMarker': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxParamPropStringFilePathExists': {
'param_type': 'int',
'dimensions': 1,
'default': 1,
'valid_values': [0, 1]
},
'OfxParamPropStringMode': {
'param_type': 'str',
'dimensions': 1,
'default': 'OfxParamStringIsSingleLine',
'valid_values': [
'OfxParamStringIsSingleLine',
'OfxParamStringIsMultiLine',
'OfxParamStringIsFilePath',
'OfxParamStringIsDirectoryPath',
'OfxParamStringIsLabel',
'OfxParamStringIsRichTextFormat'
]
},
'OfxParamPropType': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxParamTypeInteger',
'OfxParamTypeDouble',
'OfxParamTypeBoolean',
'OfxParamTypeChoice',
'OfxParamTypeRGBA',
'OfxParamTypeRGB',
'OfxParamTypeDouble2D',
'OfxParamTypeInteger2D',
'OfxParamTypeDouble3D',
'OfxParamTypeInteger3D',
'OfxParamTypeString',
'OfxParamTypeCustom',
'OfxParamTypeGroup',
'OfxParamTypePage',
'OfxParamTypePushButton'
]
},
'kOfxParamPropUseHostOverlayHandle': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxPluginPropFilePath': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxPluginPropParamPageOrder': {
'param_type': 'str',
'dimensions': 0,
'default': [''],
'valid_values': None
},
'OfxPropAPIVersion': {
'param_type': 'int',
'dimensions': 0,
'default': None,
'valid_values': None
},
'OfxPropChangeReason': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxChangeUserEdited',
'OfxChangePluginEdited',
'OfxChangeTime'
]
},
'OfxPropEffectInstance': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxPropHostOSHandle': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxPropIcon': {
'param_type': 'str',
'dimensions': 2,
'default': ['', ''],
'valid_values': None
},
'OfxPropInstanceData': {
'param_type': 'ptr',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxPropIsInteractive': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': [0, 1]
},
'kOfxPropKeyString': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'kOfxPropKeySym': {
'param_type': 'int',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxPropLabel': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxPropLongLabel': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxPropName': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxPropParamSetNeedsSyncing': {
'param_type': 'int',
'dimensions': 1,
'default': 0,
'valid_values': [0, 1]
},
'OfxPropPluginDescription': {
'param_type': 'str',
'dimensions': 1,
'default': '',
'valid_values': None
},
'OfxPropShortLabel': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
},
'OfxPropTime': {
'param_type': 'dbl',
'dimensions': 1,
'default': 0,
'valid_values': None
},
'OfxPropType': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': [
'OfxTypeImageEffectHost',
'OfxTypeImageEffect',
'OfxTypeImageEffectInstance',
'OfxTypeParameter',
'OfxTypeParameterInstance',
'OfxTypeClip',
'OfxTypeImage'
]
},
'OfxPropVersion': {
'param_type': 'int',
'dimensions': 0,
'default': [0],
'valid_values': None
},
'OfxPropVersionLabel': {
'param_type': 'str',
'dimensions': 1,
'default': None,
'valid_values': None
}
}
| 27.668893 | 75 | 0.490361 |
d69439129e4a86cac2b1b658429e6bd692fa9a98 | 1,045 | py | Python | autoPoster.py | Xlarchs/facebook-auto-post-automation | 8dc2e59e2a8e4ae6c3d1318b5b114430bffe5fd0 | [
"Apache-2.0"
] | null | null | null | autoPoster.py | Xlarchs/facebook-auto-post-automation | 8dc2e59e2a8e4ae6c3d1318b5b114430bffe5fd0 | [
"Apache-2.0"
] | null | null | null | autoPoster.py | Xlarchs/facebook-auto-post-automation | 8dc2e59e2a8e4ae6c3d1318b5b114430bffe5fd0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pyperclip
import pyautogui
import time
import string
string="your text" #string text
f = open('groups.txt','r') #Group id must be in text file example:121545789562
groups=[]
for line in f:
groups.append(line.strip())
time.sleep(5)
pyautogui.keyDown('ctrl')
pyautogui.keyDown('t')
pyautogui.keyUp('t')
pyautogui.keyUp('ctrl')
for i in range(len(groups)):
link = 'https://facebook.com/groups/'+groups[i]
pyautogui.typewrite(link)
pyautogui.typewrite('\n')
print("Waiting for 12 seconds\n")
time.sleep(12)
pyautogui.typewrite('p')
time.sleep(2)
print("Writing post\n")
for char in string:#for Unique Characters
pyperclip.copy(char)
pyautogui.hotkey('ctrl','v')
print("done")
time.sleep(2)
pyautogui.keyDown('ctrl')
pyautogui.keyDown('enter')
pyautogui.keyUp('enter')
pyautogui.keyUp('ctrl')
time.sleep(3)
pyautogui.write(['f6'])
time.sleep(1) | 21.326531 | 79 | 0.626794 |
311f39eade6c3b4e1231940c7175f8f454a62484 | 3,511 | py | Python | shopyo/api/tests/test_models.py | xdshivani/shopyo | 11a4a95968c50feb90bcb396c33b0e41c20025a3 | [
"MIT"
] | 235 | 2019-06-30T22:21:29.000Z | 2022-03-31T06:12:12.000Z | shopyo/api/tests/test_models.py | xdshivani/shopyo | 11a4a95968c50feb90bcb396c33b0e41c20025a3 | [
"MIT"
] | 441 | 2019-06-26T20:07:58.000Z | 2021-05-05T17:44:23.000Z | shopyo/api/tests/test_models.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
] | 157 | 2019-06-26T22:30:39.000Z | 2022-03-22T09:06:24.000Z | """
Tests all db utilities such as CRUDMixin defined under shopapi/models.py
Most of the test cases taken from:
https://github.com/cookiecutter-flask/cookiecutter-flask
"""
import pytest
from flask_login import UserMixin
from init import db
from shopyo.api.models import PkModel
class ExampleUserModel(PkModel, UserMixin):
"""Example user model for testing purposes"""
__tablename__ = "testusers"
username = db.Column(db.String(100), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
class TestPKModel:
"""Tests all functions of PKModel"""
def test_get_by_id(self):
result = ExampleUserModel.get_by_id("myId")
assert not result
def test_get_by_id_valid(self, db_session):
user_bar = ExampleUserModel(username="bar", email="bar@domain.com")
user_foo = ExampleUserModel(username="foo", email="foo@domain.com")
user_bar.save()
user_foo.save()
result_int = ExampleUserModel.get_by_id(user_bar.id)
result_str = ExampleUserModel.get_by_id(str(user_bar.id))
expected = (
db_session.query(ExampleUserModel)
.filter(ExampleUserModel.username == "bar")
.scalar()
)
assert result_int
assert expected
assert result_int.username == expected.username
assert result_str.username == expected.username
class TestCRUDMixin:
"""Test class for testing all CRUD functions"""
def test_create(self, db_session):
user = ExampleUserModel.create(username="bar", email="bar@domain.com")
result_raw = db_session.execute(
"""select * from testusers"""
).fetchone()
result_orm = (
db_session.query(ExampleUserModel)
.filter(ExampleUserModel.id == user.id)
.scalar()
)
assert result_orm
assert result_raw
assert result_raw.username == "bar"
assert result_orm.username == "bar"
@pytest.mark.parametrize(
"commit,expected", [(True, "foo"), (False, "bar")]
)
def test_update_single(self, db_session, commit, expected):
user = ExampleUserModel(username="bar", email="bar@domain.com")
user.save()
user.update(commit=commit, username="foo")
result = db_session.execute("""select * from testusers""").fetchone()
assert result
assert result.username == expected
@pytest.mark.parametrize(
"commit,expected",
[
(True, {"username": "foo", "email": "foo@domain.com"}),
(False, {"username": "bar", "email": "bar@domain.com"}),
],
)
def test_update_multiple(self, db_session, commit, expected):
user = ExampleUserModel(username="bar", email="bar@domain.com")
user.save()
user.update(commit=commit, username="foo", email="foo@domain.com")
result = db_session.execute("""select * from testusers""").fetchone()
assert result
assert result.username == expected["username"]
assert result.email == expected["email"]
@pytest.mark.parametrize("commit,expected", [(True, None), (False, "bar")])
def test_delete(self, commit, expected):
user = ExampleUserModel(username="bar", email="bar@domain.com")
user.save()
user.delete(commit=commit)
result = ExampleUserModel.get_by_id(user.id)
if result:
result = result.username
assert result == expected
| 32.813084 | 79 | 0.636856 |
eed23a572e7f04803af648f771d88f80bb64fb0a | 1,517 | py | Python | qiskit/extensions/standard/iden.py | rickyHong/qiskit-terra-repl | ded4f8f578d67ca5b26dda74cacfa7553f2c0299 | [
"Apache-2.0"
] | null | null | null | qiskit/extensions/standard/iden.py | rickyHong/qiskit-terra-repl | ded4f8f578d67ca5b26dda74cacfa7553f2c0299 | [
"Apache-2.0"
] | null | null | null | qiskit/extensions/standard/iden.py | rickyHong/qiskit-terra-repl | ded4f8f578d67ca5b26dda74cacfa7553f2c0299 | [
"Apache-2.0"
] | 1 | 2019-06-13T08:07:26.000Z | 2019-06-13T08:07:26.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Identity gate.
"""
import numpy
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
from qiskit.extensions.standard.u3 import U3Gate
class IdGate(Gate):
"""Identity gate."""
def __init__(self, label=None):
"""Create new Identity gate."""
super().__init__("id", 1, [], label=label)
def _define(self):
definition = []
q = QuantumRegister(1, "q")
rule = [
(U3Gate(0, 0, 0), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return IdGate() # self-inverse
def to_matrix(self):
"""Return a Numpy.array for the Id gate."""
return numpy.array([[1, 0],
[0, 1]], dtype=complex)
def iden(self, q):
"""Apply Identity to q."""
return self.append(IdGate(), [q], [])
QuantumCircuit.iden = iden
| 26.155172 | 77 | 0.627554 |
164e7dba62f7fd692eab20116274bbd815b67bcd | 15,572 | py | Python | tests/test_new_datastructures.py | jjlawren/pysonos | fc8f37036d207523bf7843b7063608373c3f1e27 | [
"MIT"
] | 8 | 2019-02-17T11:54:56.000Z | 2021-07-13T06:26:25.000Z | tests/test_new_datastructures.py | jjlawren/pysonos | fc8f37036d207523bf7843b7063608373c3f1e27 | [
"MIT"
] | 28 | 2018-09-22T10:47:11.000Z | 2021-06-14T20:42:31.000Z | tests/test_new_datastructures.py | jjlawren/pysonos | fc8f37036d207523bf7843b7063608373c3f1e27 | [
"MIT"
] | 13 | 2019-04-04T08:13:00.000Z | 2021-05-17T07:22:23.000Z | """Module to test the data structure classes with pytest."""
import pytest
from pysonos import data_structures
from pysonos.exceptions import DIDLMetadataError
from pysonos.xml import XML
def assert_xml_equal(left, right, explain=None):
"""Helper function for comparing XML elements.
Causes useful information to be output under pytest as to the differences
between elements
Args
left (Element): an Elementtree.Element to compare
right (Element): an Element to compare it with
Raises
AssertionError: if the Elements do not match
"""
def _build_explanation(left, right, explain):
if left.tag != right.tag:
explain.append(
"tag <{}> does not match tag <{}>".format(left.tag, right.tag)
)
for name, value in left.attrib.items():
if right.get(name) != value:
explain.append(
"%s attribute of element <%s> does not match: %s=%r, %s=%r"
% (name, left.tag, name, value, name, right.get(name))
)
for name in right.attrib:
if name not in left.attrib:
explain.append(
"right element <%s> has attribute %s but left does not"
% (left.tag, name)
)
if left.text != right.text:
explain.append(
"text for element <{}>: {!r} != {!r}".format(
left.tag, left.text, right.text
)
)
if left.tail != right.tail:
explain.append(
"tail for element <{}>: {!r} != {!r}".format(
left.tag, left.text, right.text
)
)
for i1, i2 in zip(left, right):
_build_explanation(i1, i2, explain)
return
explain = []
_build_explanation(left, right, explain)
if explain != []:
header = "Comparing XML elements {} and {}".format(left, right)
assert False, header + "\n".join(explain)
class TestResource:
"""Testing the Resource class."""
def test_create_didl_resource_with_no_params(self):
with pytest.raises(TypeError):
res = data_structures.DidlResource()
def test_create_didl_resource(self):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx")
assert res.uri == "a%20uri"
assert res.protocol_info == "a:protocol:info:xx"
def test_create_didl_resource_to_from_element(self, helpers):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx", bitrate=3)
elt = res.to_element()
assert helpers.compare_xml(
elt,
XML.fromstring(
b'<res bitrate="3" ' b'protocolInfo="a:protocol:info:xx">a%20uri</res>'
),
)
assert data_structures.DidlResource.from_element(elt) == res
def test_didl_resource_to_dict(self):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx")
rez = res.to_dict()
assert rez["uri"] == "a%20uri"
assert rez["protocol_info"] == "a:protocol:info:xx"
assert len(rez) == 12
def test_didl_resource_to_dict_remove_nones(self):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx")
rez = res.to_dict(remove_nones=True)
assert rez["uri"] == "a%20uri"
assert rez["protocol_info"] == "a:protocol:info:xx"
assert len(rez) == 2
def test_didl_resource_from_dict(self):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx")
rez = data_structures.DidlResource.from_dict(res.to_dict())
assert res == rez
def test_didl_resource_from_dict_remove_nones(self):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx")
rez = data_structures.DidlResource.from_dict(res.to_dict(remove_nones=True))
assert res == rez
def test_didl_resource_eq(self):
res = data_structures.DidlResource("a%20uri", "a:protocol:info:xx")
assert res != data_structures.DidlObject(
title="a_title", parent_id="pid", item_id="iid"
)
assert res is not None
assert res == res
class TestDidlObject:
"""Testing the DidlObject base class."""
didl_xml = """
<item xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
id="iid" parentID="pid" restricted="true">
<dc:title>the_title</dc:title>
<upnp:class>object</upnp:class>
<dc:creator>a_creator</dc:creator>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">DUMMY</desc>
</item>
"""
def test_create_didl_object_with_no_params(self):
with pytest.raises(TypeError):
didl_object = data_structures.DidlObject()
def test_create_didl_object_with_disallowed_params(self):
with pytest.raises(ValueError) as excinfo:
didl_object = data_structures.DidlObject(
title="a_title", parent_id="pid", item_id="iid", bad_args="other"
)
assert "not allowed" in str(excinfo.value)
def test_create_didl_object_with_good_params(self):
didl_object = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a_creator",
desc="dummy",
)
assert didl_object is not None
assert didl_object.title == "a_title"
assert didl_object.parent_id == "pid"
assert didl_object.item_id == "iid"
assert didl_object.creator == "a_creator"
assert didl_object.resources == []
assert didl_object.desc == "dummy"
def test_didl_object_from_wrong_element(self):
# Using the wrong element
elt = XML.fromstring("""<res>URI</res>""")
with pytest.raises(DIDLMetadataError) as excinfo:
didl_object = data_structures.DidlObject.from_element(elt)
assert "Wrong element. Expected <item> or <container>, "
"got <res> for class object" in str(excinfo.value)
def test_didl_object_from_element(self):
elt = XML.fromstring(self.didl_xml)
didl_object = data_structures.DidlObject.from_element(elt)
assert didl_object.title == "the_title"
assert didl_object.parent_id == "pid"
assert didl_object.item_id == "iid"
assert didl_object.creator == "a_creator"
assert didl_object.desc == "DUMMY"
assert didl_object.item_class == "object"
def test_didl_object_from_element_unoff_subelement(self):
"""Test that for a DidlObject created from an element with an
unofficial .# specified sub class, that the sub class is
completely ignored
"""
elt = XML.fromstring(self.didl_xml.replace("object", "object.#SubClass"))
didl_object = data_structures.DidlObject.from_element(elt)
assert didl_object.item_class == "object"
def test_didl_object_from_wrong_class(self):
# mismatched upnp class
bad_elt1 = XML.fromstring(
"""<item xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
id="iid" parentID="pid" restricted="true">
<dc:title>the_title</dc:title>
<upnp:class>object.item</upnp:class>
<dc:creator>a_creator</dc:creator>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
RINCON_AssociatedZPUDN
</desc>
</item>
"""
)
with pytest.raises(DIDLMetadataError) as excinfo:
didl_object = data_structures.DidlObject.from_element(bad_elt1)
assert ("UPnP class is incorrect. Expected 'object', got 'object.item'") in str(
excinfo.value
)
def test_didl_object_from_dict(self):
didl_object = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a_creator",
desc="dummy",
)
the_dict = {
"title": "a_title",
"parent_id": "pid",
"item_id": "iid",
"creator": "a_creator",
"restricted": True,
"desc": "dummy",
}
assert data_structures.DidlObject.from_dict(the_dict) == didl_object
# adding in an attibute not in _translation should make no difference
the_dict["creator"] = "another_creator"
assert data_structures.DidlObject.from_dict(the_dict) != didl_object
# round trip
assert data_structures.DidlObject.from_dict(the_dict).to_dict() == the_dict
def test_didl_object_from_dict_resources(self):
resources_list = [data_structures.DidlResource("a%20uri", "a:protocol:info:xx")]
didl_object = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a_creator",
desc="dummy",
resources=resources_list,
)
the_dict = {
"title": "a_title",
"parent_id": "pid",
"item_id": "iid",
"creator": "a_creator",
"restricted": True,
"desc": "dummy",
"resources": [resource.to_dict() for resource in resources_list],
}
assert data_structures.DidlObject.from_dict(the_dict) == didl_object
def test_didl_object_from_dict_resources_remove_nones(self):
resources_list = [data_structures.DidlResource("a%20uri", "a:protocol:info:xx")]
didl_object = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a_creator",
desc="dummy",
resources=resources_list,
)
the_dict = {
"title": "a_title",
"parent_id": "pid",
"item_id": "iid",
"creator": "a_creator",
"restricted": True,
"desc": "dummy",
"resources": [
resource.to_dict(remove_nones=True) for resource in resources_list
],
}
assert data_structures.DidlObject.from_dict(the_dict) == didl_object
def test_didl_comparisons(self):
didl_object_1 = data_structures.DidlObject(
title="a_title", parent_id="pid", item_id="iid", creator="a_creator"
)
didl_object_2 = data_structures.DidlObject(
title="a_title", parent_id="pid", item_id="iid", creator="a_creator"
)
# should be not the same, but equal!
assert didl_object_1 is not didl_object_2
assert didl_object_1 == didl_object_2
didl_object_3 = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a__different_creator",
)
assert didl_object_3 != didl_object_1
def test_didl_object_to_dict(self):
didl_object = data_structures.DidlObject(
title="a_title", parent_id="pid", item_id="iid", creator="a_creator"
)
the_dict = {
"title": "a_title",
"parent_id": "pid",
"item_id": "iid",
"creator": "a_creator",
"restricted": True,
"desc": "RINCON_AssociatedZPUDN",
}
assert didl_object.to_dict() == the_dict
# adding in an attibute not in _translation should make no difference
didl_object.other = "other"
assert didl_object.to_dict() == the_dict
# but changing on the other should
didl_object.creator = "another"
assert didl_object.to_dict() != the_dict
def test_didl_object_to_dict_resources(self):
resources_list = [data_structures.DidlResource("a%20uri", "a:protocol:info:xx")]
didl_object = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a_creator",
resources=resources_list,
)
the_dict = {
"title": "a_title",
"parent_id": "pid",
"item_id": "iid",
"creator": "a_creator",
"restricted": True,
"desc": "RINCON_AssociatedZPUDN",
"resources": [resource.to_dict() for resource in resources_list],
}
assert didl_object.to_dict() == the_dict
def test_didl_object_to_dict_resources_remove_nones(self):
resources_list = [data_structures.DidlResource("a%20uri", "a:protocol:info:xx")]
didl_object = data_structures.DidlObject(
title="a_title",
parent_id="pid",
item_id="iid",
creator="a_creator",
resources=resources_list,
)
the_dict = {
"title": "a_title",
"parent_id": "pid",
"item_id": "iid",
"creator": "a_creator",
"restricted": True,
"desc": "RINCON_AssociatedZPUDN",
"resources": [
resource.to_dict(remove_nones=True) for resource in resources_list
],
}
assert didl_object.to_dict(remove_nones=True) == the_dict
def test_didl_object_to_element(self):
didl_object = data_structures.DidlObject(
title="a_title", parent_id="pid", item_id="iid", creator="a_creator"
)
# we seem to have to go through this to get ElementTree to deal
# with namespaces properly!
elt = XML.fromstring(XML.tostring(didl_object.to_element(True)))
elt2 = XML.fromstring(
'<dummy xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/" '
+ 'xmlns:dc="http://purl.org/dc/elements/1.1/" '
+ 'xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">'
+ '<item id="iid" parentID="pid" restricted="true">'
+ "<dc:title>a_title</dc:title>"
+ "<dc:creator>a_creator</dc:creator>"
+ '<upnp:class>object</upnp:class><desc id="cdudn" '
+ 'nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">'
+ "RINCON_AssociatedZPUDN</desc></item></dummy>"
)[0]
assert_xml_equal(elt2, elt)
# There is an overview of all observed classes, whether they are in or
# out of base DIDL in official_and_extended_didl_classes.txt
def test_didl_object_inheritance():
"""Test that DIDL object inheritance is as indicated by the didl class"""
class_dict = data_structures._DIDL_CLASS_TO_CLASS.copy()
class_dict["object"] = data_structures.DidlObject
for didl_class, soco_class in data_structures._DIDL_CLASS_TO_CLASS.items():
# Skip this one, because its DIDL class is expected to be an error
if didl_class == "object.itemobject.item.sonos-favorite":
continue
# object does not inherit
if didl_class == "object":
continue
# First make sure it is registered with the correct DIDL class
assert didl_class == soco_class.item_class
base_didl_class = ".".join(didl_class.split(".")[:-1])
base_class = data_structures._DIDL_CLASS_TO_CLASS[base_didl_class]
assert base_class == soco_class.__bases__[0]
| 38.449383 | 88 | 0.596006 |
90bd31761472accd9683628487079048ee343a72 | 5,163 | py | Python | forest/navigate.py | SGallagherMet/forest | e3fa392b3f874e3439539007275ab6a1770ef53f | [
"BSD-3-Clause"
] | null | null | null | forest/navigate.py | SGallagherMet/forest | e3fa392b3f874e3439539007275ab6a1770ef53f | [
"BSD-3-Clause"
] | null | null | null | forest/navigate.py | SGallagherMet/forest | e3fa392b3f874e3439539007275ab6a1770ef53f | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import fnmatch
import glob
import os
from .exceptions import (
InitialTimeNotFound,
ValidTimesNotFound,
PressuresNotFound)
from forest import (
db,
gridded_forecast,
ghrsstl4,
unified_model,
eida50,
rdt,
intake_loader,
saf,
)
class Navigator:
def __init__(self, config):
# TODO: Once the idea of a "Group" exists we can avoid using the
# config and defer the sub-navigator creation to each of the
# groups. This will remove the need for the `_from_group` helper
# and the logic in FileSystemNavigator.from_file_type().
# Also, it'd be good to switch the identification of groups from
# using the `pattern` to using the `label`. In general, not every
# group would have a `pattern`.
# e.g.
# self._navigators = {group.label: group.navigator for group in ...}
self._navigators = {group.pattern: self._from_group(group)
for group in config.file_groups}
@classmethod
def _from_group(cls, group):
if group.locator == 'database':
navigator = db.get_database(group.database_path)
else:
paths = cls._expand_paths(group.pattern)
navigator = FileSystemNavigator.from_file_type(paths,
group.file_type)
return navigator
@classmethod
def _expand_paths(cls, pattern):
return glob.glob(os.path.expanduser(pattern))
def variables(self, pattern):
navigator = self._navigators[pattern]
return navigator.variables(pattern)
def initial_times(self, pattern, variable=None):
navigator = self._navigators[pattern]
return navigator.initial_times(pattern, variable=variable)
def valid_times(self, pattern, variable, initial_time):
navigator = self._navigators[pattern]
return navigator.valid_times(pattern, variable, initial_time)
def pressures(self, pattern, variable, initial_time):
navigator = self._navigators[pattern]
return navigator.pressures(pattern, variable, initial_time)
class FileSystemNavigator:
"""Navigates collections of file(s)
.. note:: This is a naive implementation designed
to support basic command line file usage
"""
def __init__(self, paths, coordinates=None):
self.paths = paths
if coordinates is None:
coordinates = unified_model.Coordinates()
self.coordinates = coordinates
@classmethod
def from_file_type(cls, paths, file_type):
if file_type.lower() == "rdt":
coordinates = rdt.Coordinates()
elif file_type.lower() == "eida50":
coordinates = eida50.Coordinates()
elif file_type.lower() == 'griddedforecast':
# XXX This needs a "Group" object ... not "paths"
return gridded_forecast.Navigator(paths)
elif file_type.lower() == 'intake':
return intake_loader.Navigator()
elif file_type.lower() == 'ghrsstl4':
return ghrsstl4.Navigator(paths)
elif file_type.lower() == "unified_model":
coordinates = unified_model.Coordinates()
elif file_type.lower() == "saf":
coordinates = saf.Coordinates()
else:
raise Exception("Unrecognised file type: '{}'".format(file_type))
return cls(paths, coordinates)
def variables(self, pattern):
paths = fnmatch.filter(self.paths, pattern)
names = []
for path in paths:
names += self.coordinates.variables(path)
return list(sorted(set(names)))
def initial_times(self, pattern, variable=None):
paths = fnmatch.filter(self.paths, pattern)
times = []
for path in paths:
try:
time = self.coordinates.initial_time(path)
if time is None:
continue
times.append(time)
except InitialTimeNotFound:
pass
return list(sorted(set(times)))
def valid_times(self, pattern, variable, initial_time):
paths = fnmatch.filter(self.paths, pattern)
arrays = []
for path in paths:
try:
array = self.coordinates.valid_times(path, variable)
if array is None:
continue
arrays.append(array)
except ValidTimesNotFound:
pass
if len(arrays) == 0:
return []
return np.unique(np.concatenate(arrays))
def pressures(self, pattern, variable, initial_time):
paths = fnmatch.filter(self.paths, pattern)
arrays = []
for path in paths:
try:
array = self.coordinates.pressures(path, variable)
if array is None:
continue
arrays.append(array)
except PressuresNotFound:
pass
if len(arrays) == 0:
return []
return np.unique(np.concatenate(arrays))
| 34.885135 | 77 | 0.595197 |
8e84157197a7f125b9787673196a4202b93b1de3 | 516 | py | Python | 0-notes/job-search/Cracking_the_Coding_Interview/C06MathLogicPuzzles/questions/6.7-question.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | 0-notes/job-search/Cracking_the_Coding_Interview/C06MathLogicPuzzles/questions/6.7-question.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | 0-notes/job-search/Cracking_the_Coding_Interview/C06MathLogicPuzzles/questions/6.7-question.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # 6.7 The Apocalypse
# In the new post-apocalyptic world, the world queen is desperately concerned about the birth rate.
# Therefore, she decrees that all families abide by this policy.
# That is, they have to continue to have children until they have 1 girl, at which point they immediately stop.
# What will the gender ratio of the new generation be?
# Assume that the odds off someone having a boy or girl on any given pregnancy is equal.
# Solve this out logically and then write a computer simulation of it.
| 46.909091 | 111 | 0.775194 |
17722e3bc1c08c98b0dac62383cb7a940508bd43 | 13,499 | py | Python | scrapeNHL.py | dradeng/DefaceTheHouse | ae95c82f692eb1e68a015610439c5edf610b37ae | [
"MIT"
] | null | null | null | scrapeNHL.py | dradeng/DefaceTheHouse | ae95c82f692eb1e68a015610439c5edf610b37ae | [
"MIT"
] | null | null | null | scrapeNHL.py | dradeng/DefaceTheHouse | ae95c82f692eb1e68a015610439c5edf610b37ae | [
"MIT"
] | null | null | null | import requests
import json
import pymongo
from bs4 import BeautifulSoup
''' Oddsshark SCRAPE SECTION '''
URL = 'https://www.oddsshark.com/ncaab/middle-tennessee-north-texas-odds-march-10-2021-1389706'
espnURL = 'https://www.espn.com/mens-college-basketball/game?gameId=401301264'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(id='gc-data').get_text(strip=True)
parsed = json.loads(results)
print(json.dumps(parsed, indent=4, sort_keys=True))
print(parsed['oddsshark_gamecenter']['gameCenterData'])
date = parsed['oddsshark_gamecenter']['matchup']['header_date']
location = parsed['oddsshark_gamecenter']['matchup']['stadium']
home_abbreviation = parsed['oddsshark_gamecenter']['matchup']['home_abbreviation']
home_name = parsed['oddsshark_gamecenter']['matchup']['home_name']
home_primary_color = parsed['oddsshark_gamecenter']['matchup']['home_primary_color']
home_predicted_score = parsed['oddsshark_gamecenter']['gameCenterData']['home_predicted_score']
home_team_subtitle = parsed['oddsshark_gamecenter']['gameCenterData']['home_team_subtitle']
home_value = parsed['oddsshark_gamecenter']['consensus']['home_value']
home_votes = parsed['oddsshark_gamecenter']['consensus']['home_votes']
under_votes = parsed['oddsshark_gamecenter']['consensus']['under_votes']
over_votes = parsed['oddsshark_gamecenter']['consensus']['over_votes']
if over_votes == 0:
over_votes = 50
under_votes = 50
if home_votes == 0:
away_votes = 50
home_votes = 50
away_abbreviation = parsed['oddsshark_gamecenter']['matchup']['away_abbreviation']
away_name = parsed['oddsshark_gamecenter']['matchup']['away_name']
away_primary_color = parsed['oddsshark_gamecenter']['matchup']['away_primary_color']
away_predicted_score = parsed['oddsshark_gamecenter']['gameCenterData']['away_predicted_score']
away_team_subtitle = parsed['oddsshark_gamecenter']['gameCenterData']['away_team_subtitle']
away_value = parsed['oddsshark_gamecenter']['consensus']['away_value']
away_votes = parsed['oddsshark_gamecenter']['consensus']['away_votes']
best_home_spread_number = parsed['oddsshark_gamecenter']['bestOdds']['home']['spread'];
best_home_spread_price = parsed['oddsshark_gamecenter']['bestOdds']['home']['spread_price'];
best_home_spread_image = parsed['oddsshark_gamecenter']['bestOdds']['home']['spread_book_image'];
best_home_spread_link = parsed['oddsshark_gamecenter']['bestOdds']['home']['spread_book_name'];
best_home_moneyline_price = parsed['oddsshark_gamecenter']['bestOdds']['home']['money_line'];
best_home_moneyline_image = parsed['oddsshark_gamecenter']['bestOdds']['home']['money_line_book_image'];
best_home_moneyline_link = parsed['oddsshark_gamecenter']['bestOdds']['home']['money_line_book_name'];
best_away_spread_number = parsed['oddsshark_gamecenter']['bestOdds']['away']['spread'];
best_away_spread_price = parsed['oddsshark_gamecenter']['bestOdds']['away']['spread_price'];
best_away_spread_image = parsed['oddsshark_gamecenter']['bestOdds']['away']['spread_book_image'];
best_away_spread_link = parsed['oddsshark_gamecenter']['bestOdds']['away']['spread_book_name'];
best_away_moneyline_price = parsed['oddsshark_gamecenter']['bestOdds']['away']['money_line'];
best_away_moneyline_image = parsed['oddsshark_gamecenter']['bestOdds']['away']['money_line_book_image'];
best_away_moneyline_link = parsed['oddsshark_gamecenter']['bestOdds']['away']['money_line_book_name'];
best_over_price = parsed['oddsshark_gamecenter']['bestOdds']['total']['over_price'];
best_over_number = parsed['oddsshark_gamecenter']['bestOdds']['total']['over_total'];
best_over_link = parsed['oddsshark_gamecenter']['bestOdds']['total']['over_book_name'];
best_over_image = parsed['oddsshark_gamecenter']['bestOdds']['total']['over_book_image'];
best_under_price = parsed['oddsshark_gamecenter']['bestOdds']['total']['under_price'];
best_under_number = parsed['oddsshark_gamecenter']['bestOdds']['total']['under_total'];
best_under_link = parsed['oddsshark_gamecenter']['bestOdds']['total']['under_book_name'];
best_under_image = parsed['oddsshark_gamecenter']['bestOdds']['total']['under_book_image'];
home_trends = parsed['oddsshark_gamecenter']['trends']['home'];
away_trends = parsed['oddsshark_gamecenter']['trends']['away'];
arr_home_trends = []
arr_away_trends = []
home_injuries = parsed['oddsshark_gamecenter']['injuries']['home'];
away_injuries = parsed['oddsshark_gamecenter']['injuries']['away'];
arr_home_injuries = []
arr_away_injuries = []
#home moneylinee
if best_home_moneyline_link == '5Dimes':
best_home_moneyline_link = 'https://www.5dimes.eu/'
elif best_home_moneyline_link == 'BOVADA.LV':
best_home_moneyline_link = 'https://www.bovada.lv/'
elif best_home_moneyline_link == 'BetOnline':
best_home_moneyline_link = 'https://betonline.ag/'
elif best_home_moneyline_link == 'Intertops':
best_home_moneyline_link = 'https://link.intertops.eu/c/408817/'
elif best_home_moneyline_link == 'BetNow':
best_home_moneyline_link = 'https://www.betnow.eu/wc/clicks.php?aff=DRADENGAFFNEY@GMAIL.COM_715_881_32740'
elif best_home_moneyline_link == 'SportsBetting': #commision kings
best_home_moneyline_link = 'https://sportsbetting.ag/'
else:
best_home_moneyline_link = 'https://link.intertops.eu/c/408817/'
#away moneyline
if best_away_moneyline_link == '5Dimes':
best_away_moneyline_link = 'https://www.5dimes.eu/'
elif best_away_moneyline_link == 'BOVADA.LV':
best_away_moneyline_link = 'https://www.bovada.lv/'
elif best_away_moneyline_link == 'BetOnline':
best_away_moneyline_link = 'https://betonline.ag/'
elif best_away_moneyline_link == 'Intertops':
best_over_link = 'https://link.intertops.eu/c/408817/'
elif best_away_moneyline_link == 'BetNow':
'https://www.betnow.eu/wc/clicks.php?aff=DRADENGAFFNEY@GMAIL.COM_715_881_32740'
elif best_away_moneyline_link == 'SportsBetting': #commision kings
best_away_moneyline_link = 'https://sportsbetting.ag/'
else:
best_away_moneyline_link = 'https://link.intertops.eu/c/408817/'
#home spread
if best_home_spread_link == '5Dimes':
best_home_spread_link = 'https://www.5dimes.eu/'
elif best_home_spread_link == 'BOVADA.LV':
best_home_spread_link = 'https://www.bovada.lv/'
elif best_home_spread_link == 'BetOnline':
best_home_spread_link = 'https://betonline.ag/'
elif best_home_spread_link == 'Intertops':
best_home_spread_link = 'https://link.intertops.eu/c/408817/'
elif best_home_spread_link == 'BetNow':
best_home_spread_link ='https://www.betnow.eu/wc/clicks.php?aff=DRADENGAFFNEY@GMAIL.COM_715_881_32740'
elif best_home_spread_link == 'SportsBetting': #commision kings
best_home_spread_link = 'https://sportsbetting.ag/'
else:
best_home_spread_link = 'https://link.intertops.eu/c/408817/'
#away spread
if best_away_spread_link == '5Dimes':
best_away_spread_link = 'https://www.5dimes.eu/'
elif best_away_spread_link == 'BOVADA.LV':
best_away_spread_link = 'https://www.bovada.lv/'
elif best_away_spread_link == 'BetOnline':
best_away_spread_link = 'https://betonline.ag/'
elif best_away_spread_link == 'Intertops':
best_over_link = 'https://link.intertops.eu/c/408817/'
elif best_away_spread_link == 'BetNow':
best_away_spread_link = 'https://www.betnow.eu/wc/clicks.php?aff=DRADENGAFFNEY@GMAIL.COM_715_881_32740'
elif best_away_spread_link == 'SportsBetting': #commision kings
best_away_spread_link = 'https://sportsbetting.ag/'
else:
best_away_spread_link = 'https://link.intertops.eu/c/408817/'
#over
if best_over_link == '5Dimes': #applied waiting
best_over_link = 'https://www.5dimes.eu/'
elif best_over_link == 'BOVADA.LV': #emailed need to follwuup
best_over_link = 'https://www.bovada.lv/'
elif best_over_link == 'BetOnline': #commision kings
best_over_link = 'https://betonline.ag/'
elif best_over_link == 'Intertops': #its own approved
best_over_link = 'https://link.intertops.eu/c/408817/'
elif best_over_link == 'BetNow': #waiting approval
best_over_link = 'https://www.betnow.eu/wc/clicks.php?aff=DRADENGAFFNEY@GMAIL.COM_715_881_32740'
elif best_over_link == 'SportsBetting': #commision kings
best_over_link = 'https://sportsbetting.ag/'
else:
best_over_link = 'https://link.intertops.eu/c/408817/'
#under
if best_under_link == '5Dimes':
best_under_link = 'https://www.5dimes.eu/'
elif best_under_link == 'BOVADA.LV':
best_under_link = 'https://www.bovada.lv/'
elif best_under_link == 'BetOnline':
best_under_link = 'https://betonline.ag/'
elif best_under_link == 'Intertops':
best_under_link = 'https://link.intertops.eu/c/408817/'
elif best_under_link == 'BetNow':
best_under_link = 'https://www.betnow.eu/wc/clicks.php?aff=DRADENGAFFNEY@GMAIL.COM_715_881_32740'
elif best_under_link == 'SportsBetting': #commision kings
best_under_link = 'https://sportsbetting.ag/'
else:
best_under_link = 'https://link.intertops.eu/c/408817/'
for object in home_trends:
arr_home_trends.append(object['value'])
for object in away_trends:
arr_away_trends.append(object['value'])
for object in home_injuries:
inj = object['name'] + ': ' + object['description']+', ' + object['message']
arr_home_injuries.append(inj)
for object in away_injuries:
inj = object['name'] + ': ' + object['description']+', ' + object['message']
arr_away_injuries.append(inj)
''' ESPN SCRAPE SECTION '''
page = requests.get(espnURL)
soup = BeautifulSoup(page.content, 'html.parser')
espnStats = soup.find_all('td', class_='score')
over_under = 0
home_spread = 0
away_spread = 0
home_moneyline = 0
away_moneyline = 0
home_win_percentage = soup.find('span', class_='value-home').get_text(strip=True)
away_win_percentage = soup.find('span', class_='value-away').get_text(strip=True)
count = 0
for stat in espnStats:
count = count + 1
if count == 4:
away_spread = stat.get_text(strip=True)
elif count == 5:
away_moneyline = stat.get_text(strip=True)
elif count == 6:
over_under = stat.get_text(strip=True)
elif count == 9:
home_spread = stat.get_text(strip=True)
elif count == 10:
home_moneyline = stat.get_text(strip=True)
home_predicted_score = float(home_predicted_score)
away_predicted_score = float(away_predicted_score)
predicted_diff = abs(home_predicted_score - away_predicted_score)
espn_diff = abs((float(home_win_percentage[:-1]) - float(away_win_percentage[:-1]))/6)
changed = False
print(espn_diff)
print(predicted_diff)
while (espn_diff > predicted_diff+.5):
if (float(home_win_percentage[:-1]) >= float(away_win_percentage[:-1])):
home_predicted_score += .3
away_predicted_score -= .3
else:
home_predicted_score -= .3
away_predicted_score += .3
predicted_diff = abs(home_predicted_score - away_predicted_score)
changed = True
if not changed:
home_predicted_score += .1
away_predicted_score -= .1
if (espn_diff > 15):
home_predicted_score += 1
away_predicted_score -= .5
home_predicted_score -= .5
away_predicted_score -= .5
home_predicted_score = str(home_predicted_score)
away_predicted_score = str(away_predicted_score)
client = pymongo.MongoClient("mongodb://databaseUser:password11@cluster0-shard-00-00.9fxpc.mongodb.net:27017,cluster0-shard-00-01.9fxpc.mongodb.net:27017,cluster0-shard-00-02.9fxpc.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-unth7l-shard-0&authSource=admin&retryWrites=true&w=majority")
mydb = client["DefaceTheHouse"]
mycol = mydb["nhlgames"]
mydict = {
"date": date,
"location": location,
"home_abbreviation": home_abbreviation,
"home_name": home_name,
"home_primary_color": home_primary_color,
"home_predicted_score": home_predicted_score,
"home_team_subtitle": home_team_subtitle,
"home_value": home_value,
"home_votes": home_votes,
"away_abbreviation": away_abbreviation,
"away_name": away_name,
"away_primary_color": away_primary_color,
"away_predicted_score": away_predicted_score,
"away_team_subtitle": away_team_subtitle,
"away_value": away_value,
"away_votes": away_votes,
"over_under": over_under,
"home_spread": home_spread,
"away_spread": away_spread,
"home_moneyline": home_moneyline,
"away_moneyline": away_moneyline,
"home_win_percentage": home_win_percentage,
"away_win_percentage": away_win_percentage,
"over_votes": over_votes,
"under_votes": under_votes,
'best_home_spread_number': best_home_spread_number,
'best_home_spread_price': best_home_spread_price,
'best_home_spread_image': best_home_spread_image,
'best_home_spread_link': best_home_spread_link,
'best_home_moneyline_price': best_home_moneyline_price,
'best_home_moneyline_image': best_home_moneyline_image,
'best_home_moneyline_link': best_home_moneyline_link,
'best_away_spread_number': best_away_spread_number,
'best_away_spread_price': best_away_spread_price,
'best_away_spread_image': best_away_spread_image,
'best_away_spread_link': best_away_spread_link,
'best_away_moneyline_price': best_away_moneyline_price,
'best_away_moneyline_image': best_away_moneyline_image,
'best_away_moneyline_link': best_away_moneyline_link,
'best_over_price': best_over_price,
'best_over_number': best_over_number,
'best_over_link': best_over_link,
'best_over_image': best_over_image,
'best_under_price': best_under_price,
'best_under_number': best_under_number,
'best_under_link': best_under_link,
'best_under_image': best_under_image,
'home_trends': arr_home_trends,
'away_trends': arr_away_trends,
'home_injuries': arr_home_injuries,
'away_injuries': arr_away_injuries,
'model_spread_record': '0-0',
'model_over_under_record': '0-0',
}
x = mycol.insert_one(mydict) | 34.524297 | 302 | 0.770353 |
a06105bee26990547cedf749ae0dd05a53aa6b96 | 4,963 | py | Python | Schweinchenstempel.py | polarwinkel/Schweinchenstempel | b3acc351fce90186f2264325e56ff9edcb0f21c5 | [
"WTFPL"
] | null | null | null | Schweinchenstempel.py | polarwinkel/Schweinchenstempel | b3acc351fce90186f2264325e56ff9edcb0f21c5 | [
"WTFPL"
] | null | null | null | Schweinchenstempel.py | polarwinkel/Schweinchenstempel | b3acc351fce90186f2264325e56ff9edcb0f21c5 | [
"WTFPL"
] | null | null | null | #!/bin/python3
# This file is part of the free Software Schweinchenstempel.
# It is (c) 2018 under the WTFPL Version 2.0:
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
#Copyright (C) 2018 Dirk Winkel <it@polarwinkel.de>
#
#Everyone is permitted to copy and distribute verbatim or modified
#copies of this license document, and changing it is allowed as long
#as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
import http.server
import socketserver
from http import HTTPStatus
from collections import OrderedDict
import yaml
import os
import datetime
namen = 'Ole,Erik,Lasse'
#anzahl = '5'
port = 2121
schweinchenfile = 'schweinchen.yml'
kinder = ['']
def parseNamen():
''' parst die Namen in eine List '''
i = 0
for char in namen:
if char == ',':
kinder.append('')
i += 1
else:
kinder[i] = kinder[i]+char
def reset():
''' create empty Schweinchen-yml-file '''
data = {'Ole': {'Schweinchen': [], 'Sternchen': []}, 'Erik': {'Schweinchen': [], 'Sternchen': []}, 'Lasse': {'Schweinchen': [], 'Sternchen': []}}
yaml.safe_dump(data, open(schweinchenfile, 'w'))
def createPage():
''' Create the HTML-Page based on the saved data '''
page = '<!doctype html><html><head><title>Schweinchen- und Sternchenstempel</title><style>body{font-family: Arial, Helvetica, sans-serif;}</style></head>\n<body>\n'
data = yaml.load(open(schweinchenfile, 'r'))
for kind in kinder:
page = page+'<h1 style="font-size:50px; margin:0; padding-bottom:0;">'+kind+'</h1>\n'
page = page+getSchweinchen(kind, data[kind])
page = page+'<form method="post" enctype="text/plain">\n'
page = page+'<button name="Schweinchen" value="%s"><p style="font-size:50px; margin:0;">+1<img src="Schweinchen.svg" height="60px" /></p></button>\n'% kind
page = page+'<button name="Sternchen" value="%s"><p style="font-size:50px; margin:0;">+1<img src="Sternchen.svg" height="60px" /></p></button><br />\n'% kind
page = page+'</form><br />\n'
page = page+'<hr>'
page = page+'<form method="post" enctype="text/plain"><button name="reset" value="true"><p style="font-size:50px; margin:0;">Reset</p></button></form>\n'
page = page+'</body></html>'
return bytes(page, 'utf8')
def getSchweinchen(name, data):
''' Creates the Schweinchen-Content depending on the saved Schweinchen '''
ergebnis = ''
for s in data['Schweinchen']:
ergebnis = ergebnis+'<figure style="float:left;"><img src="Schweinchen.svg" height="120px" alt="%s" />'% s
ergebnis = ergebnis+'<figcaption>%s</figcaption></figure>\n'% s
ergebnis = ergebnis+'<br style="clear:both" />\n'
for s in data['Sternchen']:
ergebnis = ergebnis+'<figure style="float:left;"><img src="Sternchen.svg" height="120px" alt="%s"/>'% s
ergebnis = ergebnis+'<figcaption>%s</figcaption></figure>\n'% s
ergebnis = ergebnis+'<br style="clear:both" />'
return ergebnis
def changeSchweinchen(post_data):
''' change the Schweinchen-yml according to the post_data '''
print(post_data)
if 'reset=true' in post_data:
reset()
return
data = yaml.load(open(schweinchenfile, 'r'))
for kind in kinder:
if 'Schweinchen='+kind in post_data:
data[kind]['Schweinchen'].append('{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
yaml.safe_dump(data, open(schweinchenfile, 'w'))
elif 'Sternchen='+kind in post_data:
data[kind]['Sternchen'].append('{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
yaml.safe_dump(data, open(schweinchenfile, 'w'))
class Handler(http.server.SimpleHTTPRequestHandler):
''' Handler to serve the Website and get the POST-data '''
def do_GET(self):
''' return page on GET-request '''
self.send_response(HTTPStatus.OK)
self.end_headers()
if self.path.endswith(".svg"):
f=open(os.path.basename(self.path), 'rb')
self.wfile.write(f.read())
f.close()
else:
self.wfile.write(createPage())
def do_POST(self):
''' get the POST-Data and give feedback if it is fine to work with '''
self.send_response(HTTPStatus.OK)
self.end_headers()
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
changeSchweinchen(str(post_data, 'utf8'))
self.wfile.write(createPage())
parseNamen()
if os.path.dirname(__file__) != '':
os.chdir(os.path.dirname(__file__))
if not os.path.isfile(schweinchenfile):
reset()
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(('', port), Handler)
httpd.serve_forever()
| 39.704 | 168 | 0.632279 |
c5022dd1bf26d72429d2e84f3c7e062e3a26beff | 139 | py | Python | seven_eleven.py | lxgreen/seven_eleven | 4562aac7565ac642157ec979d9ca3b8bb876693f | [
"MIT"
] | null | null | null | seven_eleven.py | lxgreen/seven_eleven | 4562aac7565ac642157ec979d9ca3b8bb876693f | [
"MIT"
] | null | null | null | seven_eleven.py | lxgreen/seven_eleven | 4562aac7565ac642157ec979d9ca3b8bb876693f | [
"MIT"
] | null | null | null | from app.app import SevenElevenApp
def main():
app = SevenElevenApp(None)
app.mainloop()
if __name__ == '__main__':
main()
| 12.636364 | 34 | 0.654676 |
bf868a4a98850e25063bdb4f5985799eb08b693d | 511 | py | Python | blood/migrations/0005_subs.py | nikhilxifer/bloodbankmanagement | a31ce090c33fa2499f26ec685220d207d9f08394 | [
"MIT"
] | null | null | null | blood/migrations/0005_subs.py | nikhilxifer/bloodbankmanagement | a31ce090c33fa2499f26ec685220d207d9f08394 | [
"MIT"
] | null | null | null | blood/migrations/0005_subs.py | nikhilxifer/bloodbankmanagement | a31ce090c33fa2499f26ec685220d207d9f08394 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-16 12:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blood', '0004_bloodrequest_date'),
]
operations = [
migrations.CreateModel(
name='Subs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
],
),
]
| 24.333333 | 114 | 0.577299 |
46f4ab2fceaa2382d421f8743b76c520a16d3cdc | 2,806 | py | Python | macdaily/cls/logging/brew.py | JarryShaw/MacDaily | 853b841dd1f1f7e6aae7bf2c305ff008bc76055c | [
"BSD-3-Clause"
] | 10 | 2018-09-20T19:57:56.000Z | 2021-11-14T18:28:10.000Z | macdaily/cls/logging/brew.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
] | 2 | 2020-05-31T08:49:47.000Z | 2021-12-28T16:57:42.000Z | macdaily/cls/logging/brew.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import shutil
import sys
import tempfile
import traceback
from macdaily.cmd.logging import LoggingCommand
from macdaily.core.brew import BrewCommand
from macdaily.util.compat import subprocess
from macdaily.util.const.term import bold, flash, purple_bg, red, red_bg, reset, under
from macdaily.util.tools.get import get_logfile
from macdaily.util.tools.make import make_stderr
from macdaily.util.tools.print import print_info, print_scpt, print_term, print_text
from macdaily.util.tools.script import script
class BrewLogging(BrewCommand, LoggingCommand):
@property
def log(self):
return 'Brewfile'
@property
def ext(self):
return ''
def _check_exec(self):
try:
subprocess.check_call(['brew', 'command', 'bundle'],
stdout=subprocess.DEVNULL, stderr=make_stderr(self._vflag))
except subprocess.CalledProcessError:
print_text(traceback.format_exc(), self._file, redirect=self._vflag)
print(f'macdaily-{self.cmd}: {red_bg}{flash}brew{reset}: command not found', file=sys.stderr)
text = (f'macdaily-{self.cmd}: {red}brew{reset}: you may find Bundler on '
f'{purple_bg}{under}https://github.com/Homebrew/homebrew-bundle{reset}, '
f'or install Bundler through following command -- '
f"`{bold}brew tap homebrew/bundle{reset}'")
print_term(text, self._file, redirect=self._qflag)
return False
self._var__exec_path = shutil.which('brew')
return True
def _parse_args(self, namespace):
self._quiet = namespace.get('quiet', False) # pylint: disable=attribute-defined-outside-init
self._verbose = namespace.get('verbose', False) # pylint: disable=attribute-defined-outside-init
def _proc_logging(self, path):
text = f'Listing installed {self.desc[1]}'
print_info(text, self._file, redirect=self._qflag)
suffix = path.replace('/', ':')
with tempfile.NamedTemporaryFile() as _temp_file:
logfile = os.path.join(self._logroot, f'{self.log}-{suffix}{self.ext}')
argv = [path, 'bundle', 'dump', '--force', f'--file={_temp_file.name}']
print_scpt(argv, self._file, redirect=self._qflag)
script(argv, self._file, shell=True,
timeout=self._timeout, redirect=self._vflag)
with open(_temp_file.name, 'r') as file:
context = file.read()
print_text(context, get_logfile(), redirect=self._vflag)
with open(logfile, 'w') as file:
file.writelines(filter(lambda s: s.startswith('brew'), context.strip().splitlines(True))) # pylint: disable=filter-builtin-not-iterating
| 41.264706 | 149 | 0.652174 |
cf8bf36873c0c524cc8498b0a8262ac0246c8a72 | 424 | py | Python | app/src/test.py | mikkohei13/Loxpyt | 78aaeb5fcd637a063e8f183b97b82150f4cbc376 | [
"MIT"
] | 1 | 2022-02-24T09:49:39.000Z | 2022-02-24T09:49:39.000Z | app/src/test.py | mikkohei13/Loxpyt | 78aaeb5fcd637a063e8f183b97b82150f4cbc376 | [
"MIT"
] | 8 | 2021-03-31T19:30:40.000Z | 2022-03-12T00:04:55.000Z | app/src/test.py | mikkohei13/Loxpyt | 78aaeb5fcd637a063e8f183b97b82150f4cbc376 | [
"MIT"
] | null | null | null |
import split_and_spectro
import profile
monoFilePath = "/_source_audio/noordwijk/Data/5DB0D594.WAV"
exportDir = "_exports/test"
directory = "noordwijk"
tempFileName = "5DB0D594.WAV"
segments = 1;
segmentMetaGenerator = split_and_spectro.parseFile(monoFilePath, exportDir, directory, tempFileName, segments, 10)
# profile.run('split_and_spectro.parseFile(monoFilePath, exportDir, directory, tempFileName, segments, 10)')
| 30.285714 | 114 | 0.806604 |
cd7031326e48283b4f9f8bb9ba3c9e3a11e5b82f | 35,664 | py | Python | Python2.7-IDCard_Detect_by_AI/tencentcloud/cam/v20190116/cam_client.py | tencentyun/scf-demo-repo | c75f9b9e8be8d079f0510b14fb0c7098d0b6bb7e | [
"Python-2.0",
"OLDAP-2.7"
] | 46 | 2019-03-01T02:19:18.000Z | 2021-12-18T12:37:02.000Z | tencentcloud/cam/v20190116/cam_client.py | RedheatWei/tencentcloud-sdk-python | 140d4e60e8bdd89f3e5ae1d8aef0bfe4fa999521 | [
"Apache-2.0"
] | 8 | 2019-01-22T10:46:43.000Z | 2020-12-30T13:03:04.000Z | tencentcloud/cam/v20190116/cam_client.py | RedheatWei/tencentcloud-sdk-python | 140d4e60e8bdd89f3e5ae1d8aef0bfe4fa999521 | [
"Apache-2.0"
] | 67 | 2018-10-29T09:50:49.000Z | 2022-01-06T07:35:56.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.cam.v20190116 import models
class CamClient(AbstractClient):
_apiVersion = '2019-01-16'
_endpoint = 'cam.tencentcloudapi.com'
def AddUser(self, request):
"""添加子用户
:param request: 调用AddUser所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.AddUserRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.AddUserResponse`
"""
try:
params = request._serialize()
body = self.call("AddUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def AddUserToGroup(self, request):
"""用户加入到用户组
:param request: 调用AddUserToGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.AddUserToGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.AddUserToGroupResponse`
"""
try:
params = request._serialize()
body = self.call("AddUserToGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddUserToGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def AttachGroupPolicy(self, request):
"""本接口(AttachGroupPolicy)可用于绑定策略到用户组。
:param request: 调用AttachGroupPolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.AttachGroupPolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.AttachGroupPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("AttachGroupPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AttachGroupPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def AttachUserPolicy(self, request):
"""本接口(AttachUserPolicy)可用于绑定到用户的策略。
:param request: 调用AttachUserPolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.AttachUserPolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.AttachUserPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("AttachUserPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AttachUserPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateGroup(self, request):
"""创建用户组
:param request: 调用CreateGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.CreateGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.CreateGroupResponse`
"""
try:
params = request._serialize()
body = self.call("CreateGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreatePolicy(self, request):
"""本接口(CreatePolicy)可用于创建策略。
:param request: 调用CreatePolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.CreatePolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.CreatePolicyResponse`
"""
try:
params = request._serialize()
body = self.call("CreatePolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreatePolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateSAMLProvider(self, request):
"""创建SAML身份提供商
:param request: 调用CreateSAMLProvider所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.CreateSAMLProviderRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.CreateSAMLProviderResponse`
"""
try:
params = request._serialize()
body = self.call("CreateSAMLProvider", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateSAMLProviderResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteGroup(self, request):
"""删除用户组
:param request: 调用DeleteGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.DeleteGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.DeleteGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeletePolicy(self, request):
"""本接口(DeletePolicy)可用于删除策略。
:param request: 调用DeletePolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.DeletePolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.DeletePolicyResponse`
"""
try:
params = request._serialize()
body = self.call("DeletePolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeletePolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteSAMLProvider(self, request):
"""删除SAML身份提供商
:param request: 调用DeleteSAMLProvider所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.DeleteSAMLProviderRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.DeleteSAMLProviderResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteSAMLProvider", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteSAMLProviderResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteUser(self, request):
"""删除子用户
:param request: 调用DeleteUser所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.DeleteUserRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.DeleteUserResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DetachGroupPolicy(self, request):
"""本接口(DetachGroupPolicy)可用于解除绑定到用户组的策略。
:param request: 调用DetachGroupPolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.DetachGroupPolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.DetachGroupPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("DetachGroupPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DetachGroupPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DetachUserPolicy(self, request):
"""本接口(DetachUserPolicy)可用于解除绑定到用户的策略。
:param request: 调用DetachUserPolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.DetachUserPolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.DetachUserPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("DetachUserPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DetachUserPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GetGroup(self, request):
"""查询用户组详情
:param request: 调用GetGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.GetGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.GetGroupResponse`
"""
try:
params = request._serialize()
body = self.call("GetGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GetPolicy(self, request):
"""本接口(GetPolicy)可用于查询查看策略详情。
:param request: 调用GetPolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.GetPolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.GetPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("GetPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GetSAMLProvider(self, request):
"""查询SAML身份提供商详情
:param request: 调用GetSAMLProvider所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.GetSAMLProviderRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.GetSAMLProviderResponse`
"""
try:
params = request._serialize()
body = self.call("GetSAMLProvider", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetSAMLProviderResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GetUser(self, request):
"""查询子用户
:param request: 调用GetUser所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.GetUserRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.GetUserResponse`
"""
try:
params = request._serialize()
body = self.call("GetUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GetUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListAttachedGroupPolicies(self, request):
"""本接口(ListAttachedGroupPolicies)可用于查询用户组关联的策略列表。
:param request: 调用ListAttachedGroupPolicies所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListAttachedGroupPoliciesRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListAttachedGroupPoliciesResponse`
"""
try:
params = request._serialize()
body = self.call("ListAttachedGroupPolicies", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListAttachedGroupPoliciesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListAttachedUserPolicies(self, request):
"""本接口(ListAttachedUserPolicies)可用于查询子账号关联的策略列表。
:param request: 调用ListAttachedUserPolicies所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListAttachedUserPoliciesRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListAttachedUserPoliciesResponse`
"""
try:
params = request._serialize()
body = self.call("ListAttachedUserPolicies", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListAttachedUserPoliciesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListEntitiesForPolicy(self, request):
"""本接口(ListEntitiesForPolicy)可用于查询策略关联的实体列表。
:param request: 调用ListEntitiesForPolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListEntitiesForPolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListEntitiesForPolicyResponse`
"""
try:
params = request._serialize()
body = self.call("ListEntitiesForPolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListEntitiesForPolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListGroups(self, request):
"""查询用户组列表
:param request: 调用ListGroups所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListGroupsRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListGroupsResponse`
"""
try:
params = request._serialize()
body = self.call("ListGroups", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListGroupsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListGroupsForUser(self, request):
"""列出用户关联的用户组
:param request: 调用ListGroupsForUser所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListGroupsForUserRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListGroupsForUserResponse`
"""
try:
params = request._serialize()
body = self.call("ListGroupsForUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListGroupsForUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListPolicies(self, request):
"""本接口(ListPolicies)可用于查询策略列表
:param request: 调用ListPolicies所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListPoliciesRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListPoliciesResponse`
"""
try:
params = request._serialize()
body = self.call("ListPolicies", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListPoliciesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListSAMLProviders(self, request):
"""查询SAML身份提供商列表
:param request: 调用ListSAMLProviders所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListSAMLProvidersRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListSAMLProvidersResponse`
"""
try:
params = request._serialize()
body = self.call("ListSAMLProviders", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListSAMLProvidersResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListUsers(self, request):
"""拉取子用户
:param request: 调用ListUsers所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListUsersRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListUsersResponse`
"""
try:
params = request._serialize()
body = self.call("ListUsers", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListUsersResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ListUsersForGroup(self, request):
"""查询用户组关联的用户列表
:param request: 调用ListUsersForGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.ListUsersForGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.ListUsersForGroupResponse`
"""
try:
params = request._serialize()
body = self.call("ListUsersForGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListUsersForGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def RemoveUserFromGroup(self, request):
"""从用户组删除用户
:param request: 调用RemoveUserFromGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.RemoveUserFromGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.RemoveUserFromGroupResponse`
"""
try:
params = request._serialize()
body = self.call("RemoveUserFromGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.RemoveUserFromGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UpdateGroup(self, request):
"""更新用户组
:param request: 调用UpdateGroup所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.UpdateGroupRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.UpdateGroupResponse`
"""
try:
params = request._serialize()
body = self.call("UpdateGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UpdateGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UpdatePolicy(self, request):
"""本接口(UpdatePolicy )可用于更新策略。
:param request: 调用UpdatePolicy所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.UpdatePolicyRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.UpdatePolicyResponse`
"""
try:
params = request._serialize()
body = self.call("UpdatePolicy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UpdatePolicyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UpdateSAMLProvider(self, request):
"""更新SAML身份提供商信息
:param request: 调用UpdateSAMLProvider所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.UpdateSAMLProviderRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.UpdateSAMLProviderResponse`
"""
try:
params = request._serialize()
body = self.call("UpdateSAMLProvider", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UpdateSAMLProviderResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def UpdateUser(self, request):
"""更新子用户
:param request: 调用UpdateUser所需参数的结构体。
:type request: :class:`tencentcloud.cam.v20190116.models.UpdateUserRequest`
:rtype: :class:`tencentcloud.cam.v20190116.models.UpdateUserResponse`
"""
try:
params = request._serialize()
body = self.call("UpdateUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.UpdateUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | 39.93729 | 98 | 0.583558 |
acde83f1e89313ed5d465cb1b6da92d21caa47e1 | 1,200 | py | Python | tumor-prediction.py | mlatmd/intro-to-ml | 515e8fc8955b7d5289064c4d7b9e21d3e0a16351 | [
"MIT"
] | 2 | 2017-10-26T23:12:46.000Z | 2017-10-27T02:43:48.000Z | tumor-prediction.py | mlatmd/intro-to-ml | 515e8fc8955b7d5289064c4d7b9e21d3e0a16351 | [
"MIT"
] | null | null | null | tumor-prediction.py | mlatmd/intro-to-ml | 515e8fc8955b7d5289064c4d7b9e21d3e0a16351 | [
"MIT"
] | 1 | 2018-03-13T04:20:32.000Z | 2018-03-13T04:20:32.000Z | import pandas
import numpy
from sklearn import ensemble
from sklearn.model_selection import train_test_split
# Read in the breast cancer data.csv
data = pandas.read_csv("data.csv", header=0)
# Take a look at pandas dataframe format
# print(data.head())
# Data cleaning
mapping = {'M' : 0, 'B' : 1}
data['diagnosis'] = data['diagnosis'].map(mapping)
features = list(data.columns[1:31]) # Appending all the columns in feature vector
train_features, test_features, train_labels, test_labels = train_test_split(data[features], data['diagnosis'].values, test_size=0.20, random_state=10)
# Get the random forest classifier from the scikit library we imported
classifier = ensemble.RandomForestClassifier()
# Train your classifier with our training data split
trained_classifier = classifier.fit(train_features.values, train_labels)
# Let's try out our trained classifier
y_prediction = trained_classifier.predict(test_features.values)
# Print out the predictions vs the actual values
print(y_prediction)
print(test_labels)
num_correct_predictions = numpy.sum(y_prediction == test_labels)
num_test_samples = float(len(test_labels))
print ("ML Accuracy", num_correct_predictions / num_test_samples) | 34.285714 | 150 | 0.7925 |
a0955de6a28b5bd8490a15a8f210f1106d98fe19 | 326 | py | Python | source/extraits/devine-nombre.py | gruiick/cours-python | 0b845093e0029dc116c62b110586001d3fab39ea | [
"CC-BY-4.0"
] | null | null | null | source/extraits/devine-nombre.py | gruiick/cours-python | 0b845093e0029dc116c62b110586001d3fab39ea | [
"CC-BY-4.0"
] | 4 | 2021-05-05T17:22:20.000Z | 2021-05-10T15:47:26.000Z | source/extraits/devine-nombre.py | gruiick/cours-python | 0b845093e0029dc116c62b110586001d3fab39ea | [
"CC-BY-4.0"
] | 1 | 2021-05-05T16:32:47.000Z | 2021-05-05T16:32:47.000Z | import random
nombre_secret = random.randint(0, 100)
print("devine le nombre auquel je pense entre 0 et 100")
entrée_utilisateur = int(input())
while True:
if entrée_utilisateur == nombre_secret:
print("bravo")
break
else:
print("mauvaise réponse")
entrée_utilisateur = int(input())
| 21.733333 | 56 | 0.668712 |
1424ffbed2799c1242dc81bc6dc72710c8143ea2 | 3,029 | py | Python | svg_ultralight/constructors/new_element.py | ShayHill/svg_ultralight | 27e42350bd5304dabb0dd65e991e9cd3f8f1a3ae | [
"MIT"
] | 1 | 2019-10-26T01:49:58.000Z | 2019-10-26T01:49:58.000Z | svg_ultralight/constructors/new_element.py | ShayHill/svg_writer | 27e42350bd5304dabb0dd65e991e9cd3f8f1a3ae | [
"MIT"
] | 1 | 2020-01-29T13:15:58.000Z | 2020-02-03T14:30:13.000Z | svg_ultralight/constructors/new_element.py | ShayHill/svg_ultralight | 27e42350bd5304dabb0dd65e991e9cd3f8f1a3ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""SVG Element constructors. Create an svg element from a dictionary.
:author: Shay Hill
:created: 1/31/2020
This is principally to allow passing values, rather than strings, as svg element
parameters.
Will translate ``stroke_width=10`` to ``stroke-width="10"``
"""
import copy
from typing import Union
from lxml import etree # type: ignore
from ..string_conversion import set_attributes
def new_element(tag: str, **attributes: Union[str, float]) -> etree.Element:
# noinspection PyShadowingNames
"""
Create an etree.Element, make every kwarg value a string.
:param tag: element tag
:param attributes: element attribute names and values
:returns: new ``tag`` element
>>> elem = new_element('line', x1=0, y1=0, x2=5, y2=5)
>>> etree.tostring(elem)
b'<line x1="0" y1="0" x2="5" y2="5"/>'
Strips trailing underscores
>>> elem = new_element('line', in_="SourceAlpha")
>>> etree.tostring(elem)
b'<line in="SourceAlpha"/>'
Translates other underscores to hyphens
>>> elem = new_element('line', stroke_width=1)
>>> etree.tostring(elem)
b'<line stroke-width="1"/>'
Special handling for a 'text' argument. Places value between element tags.
>>> elem = new_element('text', text='please star my project')
>>> etree.tostring(elem)
b'<text>please star my project</text>'
"""
elem = etree.Element(tag)
set_attributes(elem, **attributes)
return elem
def new_sub_element(
parent: etree.Element, tag: str, **attributes: Union[str, float]
) -> etree.Element:
# noinspection PyShadowingNames
"""
Create an etree.SubElement, make every kwarg value a string.
:param parent: parent element
:param tag: element tag
:param attributes: element attribute names and values
:returns: new ``tag`` element
>>> parent = etree.Element('g')
>>> _ = new_sub_element('rect')
>>> etree.tostring(parent)
b'<g><rect/></g>'
"""
elem = etree.SubElement(parent, tag)
set_attributes(elem, **attributes)
return elem
def update_element(
elem: etree.Element, **attributes: Union[str, float]
) -> etree.Element:
"""
Update an existing etree.Element with additional params.
:param elem: at etree element
:param attributes: element attribute names and values
"""
set_attributes(elem, **attributes)
return elem
def deepcopy_element(
elem: etree.Element, **attributes: Union[str, float]
) -> etree.Element:
"""
Create a deepcopy of an element. Optionally pass additional params.
:param elem: at etree element
:param attributes: element attribute names and values
"""
if isinstance(elem, list):
return [deepcopy_element(x, **attributes) for x in elem]
elem = copy.deepcopy(elem)
update_element(elem, **attributes)
return elem
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26.570175 | 80 | 0.654011 |
5a0497f22755078e8c7a7af4dce638fbb2267d17 | 2,355 | py | Python | assets/utils.py | andremargarin/digital-asset-management-api | 6ff6735754a994b87f4b8f0051e83532785fab01 | [
"MIT"
] | null | null | null | assets/utils.py | andremargarin/digital-asset-management-api | 6ff6735754a994b87f4b8f0051e83532785fab01 | [
"MIT"
] | null | null | null | assets/utils.py | andremargarin/digital-asset-management-api | 6ff6735754a994b87f4b8f0051e83532785fab01 | [
"MIT"
] | null | null | null | """
Utils functions
"""
from django.conf import settings
from django.core.files import File as DjangoFile
from moviepy.tools import extensions_dict
from moviepy.editor import VideoFileClip, AudioFileClip
from .models import AssetFile, AssetFilePart
def generate_time_frames(clip_duration, frame_size):
if (clip_duration % frame_size == 0):
nparts = int(clip_duration / frame_size)
else:
nparts = int(clip_duration / frame_size) + 1
breakpoints = [
i * frame_size if i * frame_size < clip_duration else clip_duration
for i in range(0, nparts + 1)
]
time_frames = []
for i in range(nparts):
time_frame = (breakpoints[i], breakpoints[i+1])
time_frames.append(time_frame)
return time_frames
def save_file_and_audio_tracks(owner, content):
filename = content.name[:content.name.rfind('.')]
extension = content.name[content.name.rfind('.')+1:]
content_tempfile_path = '{temp_dir}{filename}.{extension}'.format(
temp_dir=settings.FILE_UPLOAD_TEMP_DIR,
filename=filename,
extension=extension)
with open(content_tempfile_path, 'wb+') as destination:
destination.write(content.read())
clip_type = extensions_dict[extension]['type']
if clip_type == 'video':
audio = VideoFileClip(content_tempfile_path).audio
elif clip_type == 'audio':
audio = AudioFileClip(content_tempfile_path)
asset_file = AssetFile(owner=owner, content=content, name=content.name)
asset_file.save()
time_frames = generate_time_frames(
clip_duration=audio.duration,
frame_size=settings.ASSETS_ASSET_FILE_PART_SIZE)
for part_number, frame in enumerate(time_frames, start=1):
audio_clip_part = audio.subclip(*frame)
audio_path = '{temp_dir}{filename}_{part_number}.{extension}'.format(
temp_dir=settings.FILE_UPLOAD_TEMP_DIR,
filename=filename,
part_number=part_number,
extension='mp3')
audio_clip_part.write_audiofile(audio_path)
with open(audio_path, 'rb') as audio_file:
file_content = DjangoFile(audio_file)
file_part = AssetFilePart(asset_file=asset_file, order=part_number)
file_part.content.save(audio_path, file_content)
file_part.save()
return asset_file
| 32.260274 | 79 | 0.689597 |
e4b7cc7e0db1834ca38811869b401d96ea51974e | 6,701 | py | Python | tensorflow_probability/python/math/psd_kernels/internal/util_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/math/psd_kernels/internal/util_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/math/psd_kernels/internal/util_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Positive-Semidefinite Kernels utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.math.gradient import value_and_gradient
from tensorflow_probability.python.math.psd_kernels.internal import util
class UtilTest(tf.test.TestCase):
def testPadShapeRightWithOnes(self):
# Test nominal behavior.
x = np.ones([3], np.float32)
self.assertAllEqual(
self.evaluate(util.pad_shape_with_ones(x, 3)).shape,
[3, 1, 1, 1])
def testPadShapeStartWithOnes(self):
# Test nominal behavior.
x = np.ones([3], np.float32)
self.assertAllEqual(
self.evaluate(util.pad_shape_with_ones(x, 3, start=-2)).shape,
[1, 1, 1, 3])
def testPadShapeMiddleWithOnes(self):
# Test nominal behavior.
x = np.ones([2, 3, 5], np.float32)
self.assertAllEqual(
self.evaluate(util.pad_shape_with_ones(x, 3)).shape,
[2, 3, 5, 1, 1, 1])
self.assertAllEqual(
self.evaluate(util.pad_shape_with_ones(x, 3, start=-2)).shape,
[2, 3, 1, 1, 1, 5])
self.assertAllEqual(
self.evaluate(util.pad_shape_with_ones(x, 3, start=-3)).shape,
[2, 1, 1, 1, 3, 5])
def testPadShapeRightWithOnesDynamicShape(self):
if tf.executing_eagerly(): return
# Test partially unknown shape
x = tf.compat.v1.placeholder_with_default(np.ones([3], np.float32), [None])
expanded = util.pad_shape_with_ones(x, 3)
self.assertAllEqual(expanded.shape.as_list(), [None, 1, 1, 1])
self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1])
expanded = util.pad_shape_with_ones(x, 3, start=-2)
self.assertAllEqual(expanded.shape.as_list(), [1, 1, 1, None])
self.assertAllEqual(self.evaluate(expanded).shape, [1, 1, 1, 3])
# Test totally unknown shape
x = tf.compat.v1.placeholder_with_default(np.ones([3], np.float32), None)
expanded = util.pad_shape_with_ones(x, 3)
self.assertIsNone(expanded.shape.ndims)
self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1])
def testPadShapeRightWithOnesCanBeGraphNoop(self):
# First ensure graph actually *is* changed when we use non-trivial ndims.
# Use an explicitly created graph, to make sure no whacky test fixture graph
# reuse is going on in the background.
g = tf.Graph()
with g.as_default():
x = tf.constant(np.ones([3], np.float32))
graph_def = g.as_graph_def()
x = util.pad_shape_with_ones(x, 3)
self.assertNotEqual(graph_def, g.as_graph_def())
# Now verify that graphdef is unchanged (no extra ops) when we pass ndims=0.
g = tf.Graph()
with g.as_default():
x = tf.constant(np.ones([3], np.float32))
graph_def = g.as_graph_def()
x = util.pad_shape_with_ones(x, 0)
self.assertEqual(graph_def, g.as_graph_def())
def testSumRightmostNdimsPreservingShapeStaticRank(self):
x = np.ones((5, 4, 3, 2))
self.assertAllEqual(
util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape,
[5, 4])
x = tf.compat.v1.placeholder_with_default(
np.ones((5, 4, 3, 2)), shape=[5, 4, None, None])
self.assertAllEqual(
util.sum_rightmost_ndims_preserving_shape(x, ndims=1).shape.as_list(),
[5, 4, 3 if tf.executing_eagerly() else None])
def testSumRightmostNdimsPreservingShapeDynamicRank(self):
if tf.executing_eagerly(): return
x = tf.compat.v1.placeholder_with_default(np.ones((5, 4, 3, 2)), shape=None)
self.assertIsNone(
util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape.ndims)
self.assertAllEqual(
self.evaluate(
util.sum_rightmost_ndims_preserving_shape(x, ndims=2)).shape,
[5, 4])
def testSqrtWithFiniteGradsHasCorrectValues(self):
self.assertTrue(np.isnan(self.evaluate(util.sqrt_with_finite_grads(-1.))))
xs = np.linspace(0., 10., 100)
self.assertAllEqual(
self.evaluate(tf.sqrt(xs)),
self.evaluate(util.sqrt_with_finite_grads(xs)))
def testSqrtWithFiniteGradsHasCorrectGradients(self):
self.assertTrue(np.isnan(self.evaluate(util.sqrt_with_finite_grads(-1.))))
xs = tf.constant(np.linspace(1e-10, 10., 100))
_, grad_tf_sqrt = value_and_gradient(tf.sqrt, xs)
_, grad_safe_sqrt = value_and_gradient(
util.sqrt_with_finite_grads, xs)
self.assertAllEqual(*self.evaluate([grad_tf_sqrt, grad_safe_sqrt]))
zero = tf.constant(0.)
_, grad_tf_sqrt = value_and_gradient(tf.sqrt, zero)
_, grad_safe_sqrt = value_and_gradient(
util.sqrt_with_finite_grads, zero)
self.assertNotEqual(*self.evaluate([grad_tf_sqrt, grad_safe_sqrt]))
def testSqrtWithFiniteGradsBackpropsCorrectly(self):
# Part of implementing a tf.custom_gradient is correctly handling the
# `grad_ys` value that is propagating back from downstream ops. This test
# checks that we got this right, in a particular case where our sqrt
# function is squashed between a couple of other functions.
def f(x):
return x ** 2
def g(x):
return util.sqrt_with_finite_grads(x)
def h(x):
return tf.sin(x) ** 2
# We only test away from zero, since we know the values don't match there.
xs = tf.constant(np.linspace(1e-10, 10., 100))
_, grad_tf_sqrt = value_and_gradient(
lambda xs_: f(tf.sqrt(h(xs_))), xs)
_, grad_safe_sqrt = value_and_gradient(
lambda xs_: f(g(h(xs_))), xs)
self.assertAllClose(*self.evaluate([grad_tf_sqrt, grad_safe_sqrt]),
rtol=1e-10)
def testSqrtWithFiniteGradsWithDynamicShape(self):
x = tf.compat.v1.placeholder_with_default([1.], shape=[None])
_, grad_tf_sqrt = value_and_gradient(tf.sqrt, x)
_, grad_safe_sqrt = value_and_gradient(
util.sqrt_with_finite_grads, x)
self.assertAllEqual(*self.evaluate([grad_tf_sqrt, grad_safe_sqrt]))
if __name__ == '__main__':
tf.test.main()
| 38.734104 | 80 | 0.689151 |
9a33146a1b3baf75e9adea63b1d4684ec38875bb | 692 | py | Python | det3d/torchie/trainer/hooks/checkpoint.py | reinforcementdriving/CIA-SSD | f7b4a9ed4a2b852845303efc6c972125438817a6 | [
"Apache-2.0"
] | 382 | 2020-12-05T06:46:28.000Z | 2022-03-29T17:40:58.000Z | det3d/torchie/trainer/hooks/checkpoint.py | reinforcementdriving/CIA-SSD | f7b4a9ed4a2b852845303efc6c972125438817a6 | [
"Apache-2.0"
] | 28 | 2020-12-08T07:50:57.000Z | 2022-03-20T03:54:43.000Z | det3d/torchie/trainer/hooks/checkpoint.py | reinforcementdriving/CIA-SSD | f7b4a9ed4a2b852845303efc6c972125438817a6 | [
"Apache-2.0"
] | 57 | 2020-12-10T02:19:03.000Z | 2022-03-19T09:49:38.000Z | from ..utils import master_only
from .hook import Hook
class CheckpointHook(Hook):
def __init__(self, interval=1, save_optimizer=True, out_dir=None, **kwargs):
'''
checkpoint_config = dict(interval=1)
'''
self.interval = interval
self.save_optimizer = save_optimizer
self.out_dir = out_dir
self.args = kwargs
@master_only
def after_train_epoch(self, trainer):
if not self.every_n_epochs(trainer, self.interval):
return
if not self.out_dir: # True
self.out_dir = trainer.work_dir
trainer.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args)
| 28.833333 | 94 | 0.648844 |
f460bff3e5df184449bab4a9963170230007ffd8 | 287 | py | Python | make_sine.py | abulovic/mega-chaos | 8fd18bed0421a84d2e116a347970b6f99b614fa0 | [
"Apache-2.0"
] | 1 | 2015-12-28T16:35:03.000Z | 2015-12-28T16:35:03.000Z | make_sine.py | abulovic/mega-chaos | 8fd18bed0421a84d2e116a347970b6f99b614fa0 | [
"Apache-2.0"
] | null | null | null | make_sine.py | abulovic/mega-chaos | 8fd18bed0421a84d2e116a347970b6f99b614fa0 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from random import random
t = np.arange(0., 1000., 0.1)
sine = np.sin(t) + np.random.rand(10000,) / 5. + np.sin(0.09 * t)
with open('sine.txt', 'w') as fout:
fout.write(','.join(map(lambda x: str(x), sine)))
import matplotlib.pyplot as plt
plt.plot(sine)
plt.show() | 26.090909 | 65 | 0.655052 |
297ab0d8f6bf9675fcc92bee2d1b570d30d64434 | 6,502 | py | Python | raiden/tests/integration/test_balance_proof_check.py | manuelwedler/raiden | 7902a9559ea4498be74504e8283d1f9f733eafa8 | [
"MIT"
] | null | null | null | raiden/tests/integration/test_balance_proof_check.py | manuelwedler/raiden | 7902a9559ea4498be74504e8283d1f9f733eafa8 | [
"MIT"
] | null | null | null | raiden/tests/integration/test_balance_proof_check.py | manuelwedler/raiden | 7902a9559ea4498be74504e8283d1f9f733eafa8 | [
"MIT"
] | null | null | null | import pytest
from raiden import waiting
from raiden.api.python import RaidenAPI
from raiden.constants import EMPTY_BALANCE_HASH, EMPTY_HASH, EMPTY_SIGNATURE
from raiden.storage.sqlite import RANGE_ALL_STATE_CHANGES
from raiden.tests.integration.network.proxies import BalanceProof
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.events import search_for_item
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.transfer import get_channelstate, transfer
from raiden.transfer import views
from raiden.transfer.state_change import ContractReceiveChannelSettled
from raiden.utils.typing import Nonce, PaymentAmount, PaymentID, TokenAmount, TokenNetworkAddress
from raiden_contracts.constants import MessageTypeId
@raise_on_failure
@pytest.mark.parametrize("deposit", [10])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
@pytest.mark.parametrize("number_of_nodes", [2])
def test_node_can_settle_if_close_didnt_use_any_balance_proof(
raiden_network, number_of_nodes, token_addresses, network_wait
):
""" A node must be able to settle a channel, even if the partner used an
old balance proof to close it.
This test will:
- Make a transfer from app0 to app1, to make sure there are balance
proofs available
- Call close manually in behalf of app1, without any balance proof data
- Assert that app0 can settle the closed channel, even though app1 didn't
use the latest balance proof
"""
app0, app1 = raiden_network
token_address = token_addresses[0]
chain_state = views.state_from_app(app0)
token_network_registry_address = app0.raiden.default_registry.address
registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
)
assert token_network_address
channel_identifier = get_channelstate(app0, app1, token_network_address).identifier
# make a transfer from app0 to app1 so that app1 is supposed to have a non
# empty balance hash
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=PaymentAmount(1),
identifier=PaymentID(1),
timeout=network_wait * number_of_nodes,
)
# stop app1 - the test uses token_network_contract now
app1.stop()
token_network_contract = app1.raiden.proxy_manager.token_network(
token_network_address, "latest"
)
empty_balance_proof = BalanceProof(
channel_identifier=channel_identifier,
token_network_address=TokenNetworkAddress(token_network_contract.address),
balance_hash=EMPTY_BALANCE_HASH,
nonce=Nonce(0),
chain_id=chain_state.chain_id,
transferred_amount=TokenAmount(0),
)
closing_data = (
empty_balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + EMPTY_SIGNATURE
)
closing_signature = app1.raiden.signer.sign(data=closing_data)
# app1 closes the channel with an empty hash instead of the expected hash
# of the transferred amount from app0
token_network_contract.close(
channel_identifier=channel_identifier,
partner=app0.raiden.address,
balance_hash=EMPTY_HASH,
nonce=0,
additional_hash=EMPTY_HASH,
non_closing_signature=EMPTY_SIGNATURE,
closing_signature=closing_signature,
given_block_identifier="latest",
)
waiting.wait_for_settle(
raiden=app0.raiden,
token_network_registry_address=registry_address,
token_address=token_address,
channel_ids=[channel_identifier],
retry_timeout=app0.raiden.alarm.sleep_time,
)
state_changes = app0.raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
assert search_for_item(
state_changes,
ContractReceiveChannelSettled,
{"token_network_address": token_network_address, "channel_identifier": channel_identifier},
)
@raise_on_failure
@pytest.mark.parametrize("deposit", [10])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
@pytest.mark.parametrize("number_of_nodes", [2])
def test_node_can_settle_if_partner_does_not_call_update_transfer(
raiden_network, number_of_nodes, token_addresses, network_wait
):
""" A node must be able to settle a channel, even if the partner did not
call update transfer.
This test will:
- Make a transfer from app0 to app1, to make sure there are balance
proofs available
- Stop app1, to make sure update is not called.
- Use app0 to close the channel.
- Assert that app0 can settle the closed channel, even though app1 didn't
use the latest balance proof
"""
app0, app1 = raiden_network
token_address = token_addresses[0]
chain_state = views.state_from_app(app0)
token_network_registry_address = app0.raiden.default_registry.address
registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
chain_state=chain_state,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
)
assert token_network_address
channel_identifier = get_channelstate(app0, app1, token_network_address).identifier
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=PaymentAmount(1),
identifier=PaymentID(1),
timeout=network_wait * number_of_nodes,
)
# stop app1 - the test uses token_network_contract now
app1.stop()
RaidenAPI(app0.raiden).channel_close(
registry_address=registry_address,
token_address=token_address,
partner_address=app1.raiden.address,
)
# app1 won't update the channel
waiting.wait_for_settle(
raiden=app0.raiden,
token_network_registry_address=registry_address,
token_address=token_address,
channel_ids=[channel_identifier],
retry_timeout=app0.raiden.alarm.sleep_time,
)
state_changes = app0.raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
assert search_for_item(
state_changes,
ContractReceiveChannelSettled,
{"token_network_address": token_network_address, "channel_identifier": channel_identifier},
)
| 39.406061 | 99 | 0.751 |
2d3bf6896577eb6a71ad93dc7c3d5cb552e6d7a4 | 132 | py | Python | apps/dendynotdead/urls.py | deniskrumko/dendynotdead | d1fd8b32cf2065413c19799f45487ed317b85eb1 | [
"MIT"
] | null | null | null | apps/dendynotdead/urls.py | deniskrumko/dendynotdead | d1fd8b32cf2065413c19799f45487ed317b85eb1 | [
"MIT"
] | null | null | null | apps/dendynotdead/urls.py | deniskrumko/dendynotdead | d1fd8b32cf2065413c19799f45487ed317b85eb1 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import IndexView
urlpatterns = [
url('^$', IndexView.as_view(), name='index'),
]
| 16.5 | 49 | 0.681818 |
6aeb373c56fef8fa62ace3104b04a19ccbc9bb48 | 2,630 | py | Python | rllib/agents/marwil/marwil.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 3 | 2020-12-03T17:48:45.000Z | 2022-01-22T08:09:46.000Z | rllib/agents/marwil/marwil.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 3 | 2019-02-22T01:49:57.000Z | 2019-07-14T03:16:11.000Z | rllib/agents/marwil/marwil.py | firebolt55439/ray | 215300b070628c06f0106906fc6c03bd70ebf140 | [
"Apache-2.0"
] | 1 | 2021-10-11T22:53:49.000Z | 2021-10-11T22:53:49.000Z | from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.agents.marwil.marwil_tf_policy import MARWILTFPolicy
from ray.rllib.execution.replay_ops import SimpleReplayBuffer, Replay, \
StoreToReplayBuffer
from ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches
from ray.rllib.execution.concurrency_ops import Concurrently
from ray.rllib.execution.train_ops import TrainOneStep
from ray.rllib.execution.metric_ops import StandardMetricsReporting
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# You should override this to point to an offline dataset (see agent.py).
"input": "sampler",
# Use importance sampling estimators for reward
"input_evaluation": ["is", "wis"],
# Scaling of advantages in exponential terms.
# When beta is 0.0, MARWIL is reduced to imitation learning.
"beta": 1.0,
# Balancing value estimation loss and policy optimization loss.
"vf_coeff": 1.0,
# Whether to calculate cumulative rewards.
"postprocess_inputs": True,
# Whether to rollout "complete_episodes" or "truncate_episodes".
"batch_mode": "complete_episodes",
# Learning rate for adam optimizer.
"lr": 1e-4,
# Number of timesteps collected for each SGD round.
"train_batch_size": 2000,
# Size of the replay buffer in batches (not timesteps!).
"replay_buffer_size": 1000,
# Number of steps to read before learning starts.
"learning_starts": 0,
# === Parallelism ===
"num_workers": 0,
})
# __sphinx_doc_end__
# yapf: enable
def get_policy_class(config):
if config["framework"] == "torch":
from ray.rllib.agents.marwil.marwil_torch_policy import \
MARWILTorchPolicy
return MARWILTorchPolicy
def execution_plan(workers, config):
rollouts = ParallelRollouts(workers, mode="bulk_sync")
replay_buffer = SimpleReplayBuffer(config["replay_buffer_size"])
store_op = rollouts \
.for_each(StoreToReplayBuffer(local_buffer=replay_buffer))
replay_op = Replay(local_buffer=replay_buffer) \
.combine(
ConcatBatches(min_batch_size=config["train_batch_size"])) \
.for_each(TrainOneStep(workers))
train_op = Concurrently(
[store_op, replay_op], mode="round_robin", output_indexes=[1])
return StandardMetricsReporting(train_op, workers, config)
MARWILTrainer = build_trainer(
name="MARWIL",
default_config=DEFAULT_CONFIG,
default_policy=MARWILTFPolicy,
get_policy_class=get_policy_class,
execution_plan=execution_plan)
| 35.540541 | 77 | 0.738403 |
d7f3d8659adebf6aa0b3e4d82991353264719fee | 2,275 | py | Python | evd_ros_backend/evd_ros_core/src/evd_script/program_nodes/machine_primitive.py | Wisc-HCI/CoFrame | 7a54344248d80cb316d36aabd40bbd3cdbbc07eb | [
"MIT"
] | null | null | null | evd_ros_backend/evd_ros_core/src/evd_script/program_nodes/machine_primitive.py | Wisc-HCI/CoFrame | 7a54344248d80cb316d36aabd40bbd3cdbbc07eb | [
"MIT"
] | null | null | null | evd_ros_backend/evd_ros_core/src/evd_script/program_nodes/machine_primitive.py | Wisc-HCI/CoFrame | 7a54344248d80cb316d36aabd40bbd3cdbbc07eb | [
"MIT"
] | null | null | null | '''
Machine Primitive extends primitive to simplify the later definition of specific machine primitives.
A machine primitive is a way to generalize machine behavior in EvD. The actual behavior needs to be
supplied externally and is hooked into the EvD program runner.
'''
from .primitive import Primitive
from ..data_nodes.machine import Machine
class MachinePrimitive(Primitive):
'''
Data structure methods
'''
@classmethod
def display_name(cls):
return 'Machine Primitive'
@classmethod
def type_string(cls, trailing_delim=True):
return 'machine-primitive' + ('.' if trailing_delim else '')
@classmethod
def full_type_string(cls):
return Primitive.full_type_string() + cls.type_string()
@classmethod
def template(cls):
template = Primitive.template()
template['parameters'].append({
'type': Machine.full_type_string(),
'key': 'machine_uuid',
'is_uuid': True,
'is_list': False
})
return template
def __init__(self, machine_uuid=None, parameters=None, type='', name='', uuid=None, parent=None,
append_type=True, editable=True, deleteable=True, description=''):
if parameters == None:
parameters = {
'machine_uuid': machine_uuid
}
super(MachinePrimitive,self).__init__(
type=MachinePrimitive.type_string() + type if append_type else type,
name=name,
uuid=uuid,
parent=parent,
append_type=append_type,
editable=editable,
deleteable=deleteable,
description=description,
parameters=parameters)
'''
Data accessor/modifier methods
'''
@property
def machine_uuid(self):
return self.parameters['machine_uuid']
@machine_uuid.setter
def machine_uuid(self, value):
if self.parameters['machine_uuid'] != value:
self.parameters['machine_uuid'] = value
self.updated_attribute('parameters.machine_uuid','set')
def set(self, dct):
if 'machine_uuid' in dct.keys():
self.machine_uuid = dct['machine_uuid']
super(MachinePrimitive,self).set(dct)
| 28.797468 | 100 | 0.627692 |
645f659d548deb3d9266098659cef64e639df409 | 20,953 | py | Python | samples/python/stitching_detailed.py | GArlington/opencv | 077193632278b8a83716ee69044965ac7cb0b2c4 | [
"BSD-3-Clause"
] | 2 | 2019-04-08T14:25:23.000Z | 2022-02-23T15:01:02.000Z | samples/python/stitching_detailed.py | saichandrareddy1/opencv | 40da61a314cef5aeb12ae4b4f05ab47fa5286d21 | [
"BSD-3-Clause"
] | null | null | null | samples/python/stitching_detailed.py | saichandrareddy1/opencv | 40da61a314cef5aeb12ae4b4f05ab47fa5286d21 | [
"BSD-3-Clause"
] | null | null | null | """Rotation model images stitcher.
stitching_detailed img1 img2 [...imgN] [flags]
Flags:
--preview
Run stitching in the preview mode. Works faster than usual mode,
but output image will have lower resolution.
--try_cuda (yes|no)
Try to use CUDA. The default value is 'no'. All default values
are for CPU mode.
\nMotion Estimation Flags:
--work_megapix <float>
Resolution for image registration step. The default is 0.6 Mpx.
--features (surf|orb|sift)
Type of features used for images matching. The default is surf.
--matcher (homography|affine)
Matcher used for pairwise image matching.
--estimator (homography|affine)
Type of estimator used for transformation estimation.
--match_conf <float>
Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.
--conf_thresh <float>
Threshold for two images are from the same panorama confidence.
The default is 1.0.
--ba (no|reproj|ray|affine)
Bundle adjustment cost function. The default is ray.
--ba_refine_mask (mask)
Set refinement mask for bundle adjustment. It looks like 'x_xxx',
where 'x' means refine respective parameter and '_' means don't
refine one, and has the following format:
<fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle
adjustment doesn't support estimation of selected parameter then
the respective flag is ignored.
--wave_correct (no|horiz|vert)
Perform wave effect correction. The default is 'horiz'.
--save_graph <file_name>
Save matches graph represented in DOT language to <file_name> file.
Labels description: Nm is number of matches, Ni is number of inliers,
C is confidence.
\nCompositing Flags:
--warp (affine|plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)
Warp surface type. The default is 'spherical'.
--seam_megapix <float>
Resolution for seam estimation step. The default is 0.1 Mpx.
--seam (no|voronoi|gc_color|gc_colorgrad)
Seam estimation method. The default is 'gc_color'.
--compose_megapix <float>
Resolution for compositing step. Use -1 for original resolution.
The default is -1.
--expos_comp (no|gain|gain_blocks)
Exposure compensation method. The default is 'gain_blocks'.
--blend (no|feather|multiband)
Blending method. The default is 'multiband'.
--blend_strength <float>
Blending strength from [0,100] range. The default is 5.
--output <result_img>
The default is 'result.jpg'.
--timelapse (as_is|crop)
Output warped images separately as frames of a time lapse movie, with 'fixed_' prepended to input file names.
--rangewidth <int>
uses range_width to limit number of images to match with.\n
"""
import numpy as np
import cv2 as cv
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='stitching_detailed')
parser.add_argument('img_names', nargs='+',help='files to stitch',type=str)
parser.add_argument('--preview',help='Run stitching in the preview mode. Works faster than usual mode but output image will have lower resolution.',type=bool,dest = 'preview' )
parser.add_argument('--try_cuda',action = 'store', default = False,help='Try to use CUDA. The default value is no. All default values are for CPU mode.',type=bool,dest = 'try_cuda' )
parser.add_argument('--work_megapix',action = 'store', default = 0.6,help=' Resolution for image registration step. The default is 0.6 Mpx',type=float,dest = 'work_megapix' )
parser.add_argument('--features',action = 'store', default = 'orb',help='Type of features used for images matching. The default is orb.',type=str,dest = 'features' )
parser.add_argument('--matcher',action = 'store', default = 'homography',help='Matcher used for pairwise image matching.',type=str,dest = 'matcher' )
parser.add_argument('--estimator',action = 'store', default = 'homography',help='Type of estimator used for transformation estimation.',type=str,dest = 'estimator' )
parser.add_argument('--match_conf',action = 'store', default = 0.3,help='Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.',type=float,dest = 'match_conf' )
parser.add_argument('--conf_thresh',action = 'store', default = 1.0,help='Threshold for two images are from the same panorama confidence.The default is 1.0.',type=float,dest = 'conf_thresh' )
parser.add_argument('--ba',action = 'store', default = 'ray',help='Bundle adjustment cost function. The default is ray.',type=str,dest = 'ba' )
parser.add_argument('--ba_refine_mask',action = 'store', default = 'xxxxx',help='Set refinement mask for bundle adjustment. mask is "xxxxx"',type=str,dest = 'ba_refine_mask' )
parser.add_argument('--wave_correct',action = 'store', default = 'horiz',help='Perform wave effect correction. The default is "horiz"',type=str,dest = 'wave_correct' )
parser.add_argument('--save_graph',action = 'store', default = None,help='Save matches graph represented in DOT language to <file_name> file.',type=str,dest = 'save_graph' )
parser.add_argument('--warp',action = 'store', default = 'plane',help='Warp surface type. The default is "spherical".',type=str,dest = 'warp' )
parser.add_argument('--seam_megapix',action = 'store', default = 0.1,help=' Resolution for seam estimation step. The default is 0.1 Mpx.',type=float,dest = 'seam_megapix' )
parser.add_argument('--seam',action = 'store', default = 'no',help='Seam estimation method. The default is "gc_color".',type=str,dest = 'seam' )
parser.add_argument('--compose_megapix',action = 'store', default = -1,help='Resolution for compositing step. Use -1 for original resolution.',type=float,dest = 'compose_megapix' )
parser.add_argument('--expos_comp',action = 'store', default = 'no',help='Exposure compensation method. The default is "gain_blocks".',type=str,dest = 'expos_comp' )
parser.add_argument('--expos_comp_nr_feeds',action = 'store', default = 1,help='Number of exposure compensation feed.',type=np.int32,dest = 'expos_comp_nr_feeds' )
parser.add_argument('--expos_comp_nr_filtering',action = 'store', default = 2,help='Number of filtering iterations of the exposure compensation gains',type=float,dest = 'expos_comp_nr_filtering' )
parser.add_argument('--expos_comp_block_size',action = 'store', default = 32,help='BLock size in pixels used by the exposure compensator.',type=np.int32,dest = 'expos_comp_block_size' )
parser.add_argument('--blend',action = 'store', default = 'multiband',help='Blending method. The default is "multiband".',type=str,dest = 'blend' )
parser.add_argument('--blend_strength',action = 'store', default = 5,help='Blending strength from [0,100] range.',type=np.int32,dest = 'blend_strength' )
parser.add_argument('--output',action = 'store', default = 'result.jpg',help='The default is "result.jpg"',type=str,dest = 'output' )
parser.add_argument('--timelapse',action = 'store', default = None,help='Output warped images separately as frames of a time lapse movie, with "fixed_" prepended to input file names.',type=str,dest = 'timelapse' )
parser.add_argument('--rangewidth',action = 'store', default = -1,help='uses range_width to limit number of images to match with.',type=int,dest = 'rangewidth' )
args = parser.parse_args()
img_names=args.img_names
print(img_names)
preview = args.preview
try_cuda = args.try_cuda
work_megapix = args.work_megapix
seam_megapix = args.seam_megapix
compose_megapix = args.compose_megapix
conf_thresh = args.conf_thresh
features_type = args.features
matcher_type = args.matcher
estimator_type = args.estimator
ba_cost_func = args.ba
ba_refine_mask = args.ba_refine_mask
wave_correct = args.wave_correct
if wave_correct=='no':
do_wave_correct= False
else:
do_wave_correct=True
if args.save_graph is None:
save_graph = False
else:
save_graph =True
save_graph_to = args.save_graph
warp_type = args.warp
if args.expos_comp=='no':
expos_comp_type = cv.detail.ExposureCompensator_NO
elif args.expos_comp=='gain':
expos_comp_type = cv.detail.ExposureCompensator_GAIN
elif args.expos_comp=='gain_blocks':
expos_comp_type = cv.detail.ExposureCompensator_GAIN_BLOCKS
elif args.expos_comp=='channel':
expos_comp_type = cv.detail.ExposureCompensator_CHANNELS
elif args.expos_comp=='channel_blocks':
expos_comp_type = cv.detail.ExposureCompensator_CHANNELS_BLOCKS
else:
print("Bad exposure compensation method")
exit()
expos_comp_nr_feeds = args.expos_comp_nr_feeds
expos_comp_nr_filtering = args.expos_comp_nr_filtering
expos_comp_block_size = args.expos_comp_block_size
match_conf = args.match_conf
seam_find_type = args.seam
blend_type = args.blend
blend_strength = args.blend_strength
result_name = args.output
if args.timelapse is not None:
timelapse = True
if args.timelapse=="as_is":
timelapse_type = cv.detail.Timelapser_AS_IS
elif args.timelapse=="crop":
timelapse_type = cv.detail.Timelapser_CROP
else:
print("Bad timelapse method")
exit()
else:
timelapse= False
range_width = args.rangewidth
if features_type=='orb':
finder= cv.ORB.create()
elif features_type=='surf':
finder= cv.xfeatures2d_SURF.create()
elif features_type=='sift':
finder= cv.xfeatures2d_SIFT.create()
else:
print ("Unknown descriptor type")
exit()
seam_work_aspect = 1
full_img_sizes=[]
features=[]
images=[]
is_work_scale_set = False
is_seam_scale_set = False
is_compose_scale_set = False;
for name in img_names:
full_img = cv.imread(name)
if full_img is None:
print("Cannot read image ",name)
exit()
full_img_sizes.append((full_img.shape[1],full_img.shape[0]))
if work_megapix < 0:
img = full_img
work_scale = 1
is_work_scale_set = True
else:
if is_work_scale_set is False:
work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1])))
is_work_scale_set = True
img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT)
if is_seam_scale_set is False:
seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1])))
seam_work_aspect = seam_scale / work_scale
is_seam_scale_set = True
imgFea= cv.detail.computeImageFeatures2(finder,img)
features.append(imgFea)
img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT)
images.append(img)
if matcher_type== "affine":
matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf)
elif range_width==-1:
matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf)
else:
matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf)
p=matcher.apply2(features)
matcher.collectGarbage()
if save_graph:
f = open(save_graph_to,"w")
f.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh))
f.close()
indices=cv.detail.leaveBiggestComponent(features,p,0.3)
img_subset =[]
img_names_subset=[]
full_img_sizes_subset=[]
num_images=len(indices)
for i in range(len(indices)):
img_names_subset.append(img_names[indices[i,0]])
img_subset.append(images[indices[i,0]])
full_img_sizes_subset.append(full_img_sizes[indices[i,0]])
images = img_subset;
img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;
num_images = len(img_names)
if num_images < 2:
print("Need more images")
exit()
if estimator_type == "affine":
estimator = cv.detail_AffineBasedEstimator()
else:
estimator = cv.detail_HomographyBasedEstimator()
b, cameras =estimator.apply(features,p,None)
if not b:
print("Homography estimation failed.")
exit()
for cam in cameras:
cam.R=cam.R.astype(np.float32)
if ba_cost_func == "reproj":
adjuster = cv.detail_BundleAdjusterReproj()
elif ba_cost_func == "ray":
adjuster = cv.detail_BundleAdjusterRay()
elif ba_cost_func == "affine":
adjuster = cv.detail_BundleAdjusterAffinePartial()
elif ba_cost_func == "no":
adjuster = cv.detail_NoBundleAdjuster()
else:
print( "Unknown bundle adjustment cost function: ", ba_cost_func )
exit()
adjuster.setConfThresh(1)
refine_mask=np.zeros((3,3),np.uint8)
if ba_refine_mask[0] == 'x':
refine_mask[0,0] = 1
if ba_refine_mask[1] == 'x':
refine_mask[0,1] = 1
if ba_refine_mask[2] == 'x':
refine_mask[0,2] = 1
if ba_refine_mask[3] == 'x':
refine_mask[1,1] = 1
if ba_refine_mask[4] == 'x':
refine_mask[1,2] = 1
adjuster.setRefinementMask(refine_mask)
b,cameras = adjuster.apply(features,p,cameras)
if not b:
print("Camera parameters adjusting failed.")
exit()
focals=[]
for cam in cameras:
focals.append(cam.focal)
sorted(focals)
if len(focals)%2==1:
warped_image_scale = focals[len(focals) // 2]
else:
warped_image_scale = (focals[len(focals) // 2]+focals[len(focals) // 2-1])/2
if do_wave_correct:
rmats=[]
for cam in cameras:
rmats.append(np.copy(cam.R))
rmats = cv.detail.waveCorrect( rmats, cv.detail.WAVE_CORRECT_HORIZ)
for idx,cam in enumerate(cameras):
cam.R = rmats[idx]
corners=[]
mask=[]
masks_warped=[]
images_warped=[]
sizes=[]
masks=[]
for i in range(0,num_images):
um=cv.UMat(255*np.ones((images[i].shape[0],images[i].shape[1]),np.uint8))
masks.append(um)
warper = cv.PyRotationWarper(warp_type,warped_image_scale*seam_work_aspect) # warper peut etre nullptr?
for idx in range(0,num_images):
K = cameras[idx].K().astype(np.float32)
swa = seam_work_aspect
K[0,0] *= swa
K[0,2] *= swa
K[1,1] *= swa
K[1,2] *= swa
corner,image_wp =warper.warp(images[idx],K,cameras[idx].R,cv.INTER_LINEAR, cv.BORDER_REFLECT)
corners.append(corner)
sizes.append((image_wp.shape[1],image_wp.shape[0]))
images_warped.append(image_wp)
p,mask_wp =warper.warp(masks[idx],K,cameras[idx].R,cv.INTER_NEAREST, cv.BORDER_CONSTANT)
masks_warped.append(mask_wp.get())
images_warped_f=[]
for img in images_warped:
imgf=img.astype(np.float32)
images_warped_f.append(imgf)
if cv.detail.ExposureCompensator_CHANNELS == expos_comp_type:
compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds)
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
elif cv.detail.ExposureCompensator_CHANNELS_BLOCKS == expos_comp_type:
compensator=cv.detail_BlocksChannelsCompensator(expos_comp_block_size, expos_comp_block_size,expos_comp_nr_feeds)
# compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
else:
compensator=cv.detail.ExposureCompensator_createDefault(expos_comp_type)
compensator.feed(corners=corners, images=images_warped, masks=masks_warped)
if seam_find_type == "no":
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
elif seam_find_type == "voronoi":
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM);
elif seam_find_type == "gc_color":
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR")
elif seam_find_type == "gc_colorgrad":
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
elif seam_find_type == "dp_color":
seam_finder = cv.detail_DpSeamFinder("COLOR")
elif seam_find_type == "dp_colorgrad":
seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD")
if seam_finder is None:
print("Can't create the following seam finder ",seam_find_type)
exit()
seam_finder.find(images_warped_f, corners,masks_warped )
imgListe=[]
compose_scale=1
corners=[]
sizes=[]
images_warped=[]
images_warped_f=[]
masks=[]
blender= None
timelapser=None
compose_work_aspect=1
for idx,name in enumerate(img_names): # https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
full_img = cv.imread(name)
if not is_compose_scale_set:
if compose_megapix > 0:
compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1])))
is_compose_scale_set = True;
compose_work_aspect = compose_scale / work_scale;
warped_image_scale *= compose_work_aspect
warper = cv.PyRotationWarper(warp_type,warped_image_scale)
for i in range(0,len(img_names)):
cameras[i].focal *= compose_work_aspect
cameras[i].ppx *= compose_work_aspect
cameras[i].ppy *= compose_work_aspect
sz = (full_img_sizes[i][0] * compose_scale,full_img_sizes[i][1]* compose_scale)
K = cameras[i].K().astype(np.float32)
roi = warper.warpRoi(sz, K, cameras[i].R);
corners.append(roi[0:2])
sizes.append(roi[2:4])
if abs(compose_scale - 1) > 1e-1:
img =cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale, interpolation=cv.INTER_LINEAR_EXACT)
else:
img = full_img;
img_size = (img.shape[1],img.shape[0]);
K=cameras[idx].K().astype(np.float32)
corner,image_warped =warper.warp(img,K,cameras[idx].R,cv.INTER_LINEAR, cv.BORDER_REFLECT)
mask =255*np.ones((img.shape[0],img.shape[1]),np.uint8)
p,mask_warped =warper.warp(mask,K,cameras[idx].R,cv.INTER_NEAREST, cv.BORDER_CONSTANT)
compensator.apply(idx,corners[idx],image_warped,mask_warped)
image_warped_s = image_warped.astype(np.int16)
image_warped=[]
dilated_mask = cv.dilate(masks_warped[idx],None)
seam_mask = cv.resize(dilated_mask,(mask_warped.shape[1],mask_warped.shape[0]),0,0,cv.INTER_LINEAR_EXACT)
mask_warped = cv.bitwise_and(seam_mask,mask_warped)
if blender==None and not timelapse:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
dst_sz = cv.detail.resultRoi(corners=corners,sizes=sizes)
blend_width = np.sqrt(dst_sz[2]*dst_sz[3]) * blend_strength / 100
if blend_width < 1:
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
elif blend_type == "multiband":
blender = cv.detail_MultiBandBlender()
blender.setNumBands((np.log(blend_width)/np.log(2.) - 1.).astype(np.int))
elif blend_type == "feather":
blender = cv.detail_FeatherBlender()
blender.setSharpness(1./blend_width)
blender.prepare(dst_sz)
elif timelapser==None and timelapse:
timelapser = cv.detail.Timelapser_createDefault(timelapse_type)
timelapser.initialize(corners, sizes)
if timelapse:
matones=np.ones((image_warped_s.shape[0],image_warped_s.shape[1]), np.uint8)
timelapser.process(image_warped_s, matones, corners[idx])
pos_s = img_names[idx].rfind("/");
if pos_s == -1:
fixedFileName = "fixed_" + img_names[idx];
else:
fixedFileName = img_names[idx][:pos_s + 1 ]+"fixed_" + img_names[idx][pos_s + 1: ]
cv.imwrite(fixedFileName, timelapser.getDst())
else:
blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx])
if not timelapse:
result=None
result_mask=None
result,result_mask = blender.blend(result,result_mask)
cv.imwrite(result_name,result)
zoomx =600/result.shape[1]
dst=cv.normalize(src=result,dst=None,alpha=255.,norm_type=cv.NORM_MINMAX,dtype=cv.CV_8U)
dst=cv.resize(dst,dsize=None,fx=zoomx,fy=zoomx)
cv.imshow(result_name,dst)
cv.waitKey()
| 51.229829 | 261 | 0.675607 |
dd9e68adc26b47491545dede127ab942856497e3 | 10,718 | py | Python | nrgpy/convert_rwd.py | abohara/nrgpy | 375ec47ab4ada82552c6942602b26e73071277aa | [
"MIT"
] | null | null | null | nrgpy/convert_rwd.py | abohara/nrgpy | 375ec47ab4ada82552c6942602b26e73071277aa | [
"MIT"
] | null | null | null | nrgpy/convert_rwd.py | abohara/nrgpy | 375ec47ab4ada82552c6942602b26e73071277aa | [
"MIT"
] | null | null | null | #!/bin/usr/python
import datetime
import time
import os
import pandas as pd
import pathlib
import subprocess
import shutil
from nrgpy.utilities import check_platform, windows_folder_path, linux_folder_path, affirm_directory, count_files, draw_progress_bar
class local(object):
"""nrgpy.convert_rwd.local - use local installation of Symphonie Data Retriever (SDR)
to convert *.RWD files to *.TXT
Parameters
----------
filename : str
if populated, a single file is exported
encryption_pin : str
four digit pin, only used for encrypted files
sdr_path : str
r'"C:/NRG/SymDR/SDR.exe"', may be any path
file_filter : str
filters files on text in filename
rwd_dir : str
folder to check for RWD files
out_dir : str
folder to save exported TXT files into
wine_folder : str
'~/.wine/drive_c/', for linux installations
use_site_file : bool
set to True to use local site file
raw_mode : bool
set to True to convert raw counts and voltages
progress_bar : bool
set to False to see individual file conversions
show_result : bool
set to False to hide prints to console
Returns
-------
None
Examples
--------
Convert a folder of RWD files to Text with SymphoniePRO Desktop Software
>>> from nrgpy.convert_rwd import local
>>> converter = local(
rwd_dir='/path/to/rwd/files',
out_dir=/path/to/txt/outputs,
file_filter='1234202001', # for files from January 2020
)
>>> converter.convert()
Convert a folder ... on Linux; this assumes you followed the instructions
in the '''SDR_Linux_README.md''' file.
>>> import getpass
>>> import nrgpy
>>> username = getpass.getuser()
>>> rwd_dir = f"/home/{username}/data/symplus3/raw"
>>> txt_dir = f"/home/{username}/data/symplus3/export"
>>> wine_folder = f"/home/{username}/prefix32/drive_c"
>>> converter = nrgpy.convert_rwd.local(
rwd_dir=rwd_dir,
out_dir=txt_dir,
wine_folder=wine_folder
)
>>> converter.convert()
"""
def __init__(self, rwd_dir='', out_dir='', filename='', encryption_pin='',
sdr_path=r'C:/NRG/SymDR/SDR.exe',
convert_type='meas', file_filter='',
wine_folder='~/.wine/drive_c/',
use_site_file=False, raw_mode=False, progress_bar=True, show_result=True, **kwargs):
if encryption_pin != '':
self.command_switch = '/z' # noqueue with pin
else:
self.command_switch = '/q' # noqueue (logger params)
if use_site_file == True:
self.command_switch = '/s' # silent (site file params)
if raw_mode == True:
self.command_switch = '/r' # silent (site file params)
self.filename = filename
self.progress_bar = progress_bar
self.encryption_pin = encryption_pin
self.sdr_path = windows_folder_path(sdr_path)[:-1]
self.root_folder = "\\".join(self.sdr_path.split('\\')[:-2])
self.RawData = self.root_folder + '\\RawData\\'
self.ScaledData = self.root_folder + '\\ScaledData\\'
self.file_filter = file_filter
if 'site_filter' in kwargs and file_filter == '':
self.file_filter = kwargs.get('site_filter')
self.rwd_dir = windows_folder_path(rwd_dir) # rwd_dir must be in Windows format, even if using Wine
self.show_result = show_result
self.platform = check_platform()
self.wine_folder = wine_folder
self.check_sdr()
if self.platform == 'win32':
self.out_dir = windows_folder_path(out_dir)
self.file_path_joiner = '\\'
else:
self.out_dir = linux_folder_path(out_dir)
self.file_path_joiner = '/'
if self.filename:
self.counter = 1
self.rwd_dir = os.path.dirname(self.filename)
self.file_filter = os.path.basename(self.filename)
self.convert()
def check_sdr(self):
"""determine if SDR is installed"""
if self.platform == 'win32':
# do the windows check
try:
os.path.exists(self.sdr_path)
self.sdr_ok = True
except:
self.sdr_ok = False
print('SDR not installed. Please install SDR or check path.\nhttps://www.nrgsystems.com/support/product-support/software/symphonie-data-retriever-software')
else:
# do the linux check
try:
subprocess.check_output(['wine','--version'])
except NotADirectoryError:
print('System not configured for running SDR.\n Please follow instructions in SDR_Linux_README.md to enable.')
try:
subprocess.check_output(['wine',self.sdr_path,'/s','test.rwd'])
affirm_directory(os.path.join(self.wine_folder, "NRG/ScaledData"))
self.sdr_ok = True
os.remove(os.path.join(self.wine_folder, "NRG/ScaledData/test.log"))
except:
self.sdr_ok = False
print('SDR unable to start')
import traceback
print(traceback.format_exc())
def convert(self):
"""process rwd files
create list of RWD files that match filtering
copy RWD files to RawData directory
iterate through files
"""
affirm_directory(self.out_dir)
self._list_files()
self._copy_rwd_files()
self.raw_count = len(self.rwd_file_list)
self.pad = len(str(self.raw_count)) + 1
self.counter = 1
self.convert_time = time.time()
self.start_time = datetime.datetime.now()
for f in sorted(self.rwd_file_list):
site_num = f[:4]
try:
self._filename = "\\".join([self.RawData+site_num,f])
self._single_file()
except:
print('file conversion failed on {}'.format(self._filename))
self.counter += 1
if self.raw_count > 1:
txt_count = count_files(self.out_dir, self.file_filter.split(".")[0], 'txt', start_time=self.convert_time)
log_count, log_files = count_files(self.out_dir, self.file_filter, 'log', show_files=True, start_time=self.convert_time)
print('\n\nRWDs in : {}'.format(self.raw_count))
print('TXTs out : {}'.format(txt_count))
print('LOGs out : {}'.format(log_count))
if len(log_files) > 0:
print('Log files created:')
for _filename in log_files:
print('\t{}'.format(_filename))
print('----------------\nDifference : {}'.format(self.raw_count - (txt_count + log_count)))
def _list_files(self):
"""get list of files in rwd_dir"""
self.dir_paths = []
self.rwd_file_list = []
if self.platform == 'win32':
walk_path = self.rwd_dir
else:
walk_path = linux_folder_path(self.rwd_dir)
for dirpath, subdirs, files in os.walk(walk_path):
self.dir_paths.append(dirpath)
for x in files:
if x.startswith(self.file_filter) and x.lower().endswith('rwd'):
self.rwd_file_list.append(x)
def _single_file(self):
"""process for converting a single file"""
_f = self._filename
if self.platform == 'linux':
self.sdr_path = windows_folder_path(self.sdr_path)[:-1]
_f = windows_folder_path(_f)[:-1]
wine = 'wine'
else:
wine = ''
self.cmd = [wine, '"'+self.sdr_path+'"', self.command_switch, self.encryption_pin, '"'+_f+'"']
try:
if self.show_result:
if self.progress_bar:
draw_progress_bar(self.counter, self.raw_count, self.start_time)
else:
print("Converting {0}/{1} {2} ... ".format(str(self.counter).rjust(self.pad),str(self.raw_count).ljust(self.pad),_f.split("\\")[-1]), end="", flush=True)
subprocess.check_output(" ".join(self.cmd), shell=True)
# subprocess.run(" ".join(self.cmd), stdout=subprocess.PIPE)
if not self.progress_bar and not self.show_result: print("[DONE]")
try:
self._copy_txt_file()
except:
print('unable to copy {} to text folder'.format(_f))
except:
if not self.progress_bar and not self.show_result: print("[FAILED]")
import traceback
print(traceback.format_exc())
def _copy_rwd_files(self):
"""copy RWD files from self.RawData to self.rwd_dir"""
for f in sorted(self.rwd_file_list):
if self.file_filter in f:
site_num = f[:4]
site_folder = os.path.join(self.RawData,site_num)
if self.platform == 'linux':
site_folder = ''.join([self.wine_folder,'/NRG/RawData/',site_num])
try:
affirm_directory(site_folder)
except:
print("couldn't create {}".format(site_folder))
pass
try:
shutil.copy(os.path.join(self.rwd_dir, f), os.path.join(site_folder))
except:
print('unable to copy file to RawData folder: {}'.format(f))
def _copy_txt_file(self):
"""copy TXT file from self.ScaledData to self.out_dir"""
try:
txt_file_name = os.path.basename(self._filename)[:-4] + '.txt'
txt_file_path = os.path.join(self.ScaledData,txt_file_name)
out_path = self.file_path_joiner.join([self.out_dir,txt_file_name])
except:
print("could not do the needful")
if self.platform == 'linux':
out_path = linux_folder_path(self.out_dir) + txt_file_name
txt_file_path = ''.join([self.wine_folder, '/NRG/ScaledData/',txt_file_name])
try:
shutil.copy(txt_file_path, out_path)
try:
os.remove(txt_file_path)
except:
print("{0} remains in {1}".format(txt_file_name, self.ScaledData))
except:
import traceback
print(traceback.format_exc())
print("Unable to copy {0} to {1}".format(txt_file_name,self.out_dir))
| 33.598746 | 177 | 0.570536 |
eb5bbd89670cb35f8dfe012811b328c0c408b84a | 878 | py | Python | setup.py | RyanSiu1995/memory-pyofiler | e261652f7070cfd77ec13e0bafe9e788bb95dd2d | [
"MIT"
] | null | null | null | setup.py | RyanSiu1995/memory-pyofiler | e261652f7070cfd77ec13e0bafe9e788bb95dd2d | [
"MIT"
] | null | null | null | setup.py | RyanSiu1995/memory-pyofiler | e261652f7070cfd77ec13e0bafe9e788bb95dd2d | [
"MIT"
] | null | null | null | """
setup.py is the main file to define this
package
"""
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="memory-pyofiler",
version="0.0.1",
author="Ryan Siu",
author_email="findme@ryansiulw.com",
description="Prometheus-based memory profiler",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/RyanSiu1995/memory-pyofiler",
project_urls={
"Bug Tracker": "https://github.com/RyanSiu1995/memory-pyofiler/issues",
},
install_requires=[
"prometheus-client",
"objgraph",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 26.606061 | 79 | 0.646925 |
96d9cbf3cb800192673e9e9895bde24188a98414 | 1,479 | bzl | Python | src/main/starlark/builtins_bzl/common/cc/cc_binary_wrapper.bzl | Dyryamo/bazel | c60eb5d324da4d81f6be93f442ac6d7576741e8e | [
"Apache-2.0"
] | 1 | 2022-03-22T11:55:06.000Z | 2022-03-22T11:55:06.000Z | src/main/starlark/builtins_bzl/common/cc/cc_binary_wrapper.bzl | Dyryamo/bazel | c60eb5d324da4d81f6be93f442ac6d7576741e8e | [
"Apache-2.0"
] | 3 | 2022-03-23T17:53:44.000Z | 2022-03-23T17:54:01.000Z | src/main/starlark/builtins_bzl/common/cc/cc_binary_wrapper.bzl | Dyryamo/bazel | c60eb5d324da4d81f6be93f442ac6d7576741e8e | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Macro encapsulating cc_binary rule implementation.
This is to avoid propagating aspect on certain attributes in case
dynamic_deps attribute is not specified.
"""
load(":common/cc/cc_binary_with_aspects.bzl", cc_binary_with_aspects = "cc_binary")
load(":common/cc/cc_binary_without_aspects.bzl", cc_binary_without_aspects = "cc_binary")
def _is_non_empty_list_or_select(value, attr):
if type(value) == "list":
return len(value) > 0
elif type(value) == "select":
return True
else:
fail("Only select or list is valid for {} attr".format(attr))
def cc_binary(**kwargs):
# Propagate an aspect if dynamic_deps attribute is specified.
if "dynamic_deps" in kwargs and _is_non_empty_list_or_select(kwargs["dynamic_deps"], "dynamic_deps"):
cc_binary_with_aspects(**kwargs)
else:
cc_binary_without_aspects(**kwargs)
| 38.921053 | 105 | 0.743746 |
d691267f5be6691decd6fecb7860f03355f09e97 | 2,502 | py | Python | gallery/ut_index.py | nrrpinto/construct | cfc980c6edfbe33c56015b736f59fb3155b51317 | [
"MIT"
] | 629 | 2015-01-06T03:01:56.000Z | 2022-03-23T13:13:26.000Z | gallery/ut_index.py | nrrpinto/construct | cfc980c6edfbe33c56015b736f59fb3155b51317 | [
"MIT"
] | 897 | 2015-02-28T15:46:06.000Z | 2022-03-30T08:19:13.000Z | gallery/ut_index.py | nrrpinto/construct | cfc980c6edfbe33c56015b736f59fb3155b51317 | [
"MIT"
] | 151 | 2015-01-08T16:36:24.000Z | 2022-03-10T16:59:49.000Z | from construct import *
from construct.lib import *
class UTIndex(Construct):
"""
Format for "Index" objects in Unreal Tournament 1999 packages.
Index objects are variable length signed integers with the following structure:
+------------------------------------+-------------------------+--------------+
| Byte 0 | Bytes 1-3 | Byte 4 |
+----------+----------+--------------+----------+--------------+--------------+
| Sign Bit | More Bit | Data Bits[6] | More Bit | Data Bits[7] | Data Bits[8] |
+----------+----------+--------------+----------+--------------+--------------+
If the "More" bit is 0 in any byte, that's the end of the Index. Otherwise,
keep going. There cannot be more than 5 bytes in an Index so Byte 4 doesn't
have a "More" bit.
"""
lengths = {0: 6, 1: 7, 2: 7, 3: 7, 4: 8}
negative_bit = 0x80
@staticmethod
def _get_data_mask(length):
return (0xFF ^ (0xFF << length)) & 0xFF
@staticmethod
def _get_more_bit(length):
return 1 << length
def _parse(self, stream, context, path):
result = 0
sign = 1
i = 0
depth = 0
while True:
length = self.lengths[i]
bits = byte2int(stream_read(stream, 1, path))
mask = self._get_data_mask(length)
data = bits & mask
more = self._get_more_bit(length) & bits
if (i == 0) and (self.negative_bit & bits):
sign = -1
result |= data << depth
if not more:
break
i += 1
depth += length
return sign * result
def _build(self, obj, stream, context, path):
if not isinstance(obj, integertypes):
raise IntegerError("Value is not an integer")
to_write = obj
for i in range(5):
byte = 0
length = self.lengths[i]
if i == 0:
negative = obj < 0
byte |= self.negative_bit * negative
if negative:
to_write *= -1
mask = self._get_data_mask(length)
byte |= to_write & mask
to_write >>= length
more_bit = (to_write > 0) and self._get_more_bit(length)
byte |= more_bit
byte &= 0xFF
stream_write(stream, int2byte(byte), 1, path)
if not more_bit:
break
return obj
| 34.273973 | 83 | 0.468425 |
75dd579a85e35f6380d5c2c44dad83fd65fb0073 | 903 | py | Python | modeling/machine_learning/ensembling/bagging.py | kmsk99/data_science_toolbar | d139b83d4547b00249d35ba76da5c063c80f4bc2 | [
"MIT"
] | null | null | null | modeling/machine_learning/ensembling/bagging.py | kmsk99/data_science_toolbar | d139b83d4547b00249d35ba76da5c063c80f4bc2 | [
"MIT"
] | null | null | null | modeling/machine_learning/ensembling/bagging.py | kmsk99/data_science_toolbar | d139b83d4547b00249d35ba76da5c063c80f4bc2 | [
"MIT"
] | null | null | null | from sklearn.ensemble import BaggingClassifier
# Bagged KNN
model = BaggingClassifier(base_estimator=KNeighborsClassifier(
n_neighbors=3), random_state=0, n_estimators=700)
model.fit(train_X, train_Y)
prediction = model.predict(test_X)
print('The accuracy for bagged KNN is:',
metrics.accuracy_score(prediction, test_Y))
result = cross_val_score(model, X, Y, cv=10, scoring='accuracy')
print('The cross validated score for bagged KNN is:', result.mean())
# Bagged DecisionTree
model = BaggingClassifier(
base_estimator=DecisionTreeClassifier(), random_state=0, n_estimators=100)
model.fit(train_X, train_Y)
prediction = model.predict(test_X)
print('The accuracy for bagged Decision Tree is:',
metrics.accuracy_score(prediction, test_Y))
result = cross_val_score(model, X, Y, cv=10, scoring='accuracy')
print('The cross validated score for bagged Decision Tree is:', result.mean())
| 41.045455 | 78 | 0.774086 |
fbc582e54dd387b2799f5a8f25b17d1608c69cde | 7,776 | py | Python | docs/conf.py | lsiksous/mauviette | 06a985846b34929f22396ed16f3b4d2647025f21 | [
"FTL"
] | null | null | null | docs/conf.py | lsiksous/mauviette | 06a985846b34929f22396ed16f3b4d2647025f21 | [
"FTL"
] | null | null | null | docs/conf.py | lsiksous/mauviette | 06a985846b34929f22396ed16f3b4d2647025f21 | [
"FTL"
] | null | null | null | # -*- coding: utf-8 -*-
#
# mauviette documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mauviette'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mauviettedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'mauviette.tex',
u'mauviette Documentation',
u"Laurent Siksous", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mauviette', u'mauviette Documentation',
[u"Laurent Siksous"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mauviette', u'mauviette Documentation',
u"Laurent Siksous", 'mauviette',
'Movie Recommendation System', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.738776 | 80 | 0.707305 |
1420a4597235f4d2eb2633e228885818b2ad478d | 3,410 | py | Python | benchmark/startCirq918.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq918.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq918.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=41
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[4])) # number=27
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[2])) # number=34
c.append(cirq.H.on(input_qubit[2])) # number=37
c.append(cirq.CZ.on(input_qubit[4],input_qubit[2])) # number=35
c.append(cirq.H.on(input_qubit[2])) # number=36
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=40
c.append(cirq.X.on(input_qubit[0])) # number=29
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=31
c.append(cirq.X.on(input_qubit[3])) # number=32
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq918.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 36.276596 | 77 | 0.64868 |
bfff64c98f9bd7806dfe492057d519a327a37d77 | 7,494 | py | Python | tests/types/test_song.py | TLINDEN/spotdl-v4 | 30112816ff49e19f76fa54299ff2e94d2d2e65cd | [
"MIT"
] | 3 | 2021-11-24T17:11:16.000Z | 2021-12-19T05:49:38.000Z | tests/types/test_song.py | TLINDEN/spotdl-v4 | 30112816ff49e19f76fa54299ff2e94d2d2e65cd | [
"MIT"
] | 2 | 2021-11-19T20:49:17.000Z | 2021-11-19T20:49:26.000Z | tests/types/test_song.py | TLINDEN/spotdl-v4 | 30112816ff49e19f76fa54299ff2e94d2d2e65cd | [
"MIT"
] | 1 | 2021-12-21T01:35:29.000Z | 2021-12-21T01:35:29.000Z | from spotdl.types.song import Song
from spotdl.utils.spotify import SpotifyClient
import pytest
def test_setup(patch_dependencies):
"""
Sets up the tests.
"""
SpotifyClient.init(
client_id="5f573c9620494bae87890c0f08a60293",
client_secret="212476d9b0f3472eaa762d90b19b0ba8",
user_auth=False,
)
def test_song_init():
"""
Test if Song class is initialized correctly.
"""
song = Song(
name="test",
artists=["test"],
album_name="test",
album_artist="test",
genres=["test"],
disc_number=1,
duration=1,
year=1,
date="test",
track_number=1,
tracks_count=1,
isrc="test",
song_id="test",
cover_url="test",
explicit=True,
download_url="test",
artist="test",
copyright="test",
disc_count=1,
publisher="test",
url="test",
)
assert song.name == "test"
assert song.artists == ["test"]
assert song.album_name == "test"
assert song.album_artist == "test"
assert song.genres == ["test"]
assert song.disc_number == 1
assert song.duration == 1
assert song.year == 1
assert song.date == "test"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "test"
assert song.song_id == "test"
assert song.cover_url == "test"
assert song.explicit == True
assert song.download_url == "test"
def test_song_wrong_init():
"""
Tests if Song class raises exception when initialized with wrong parameters.
"""
with pytest.raises(TypeError):
Song(
name="test",
artists=["test"],
album_name="test",
album_artist=1,
genres=["test"],
disc_number=1,
duration=1,
year=1,
date="test",
) # type: ignore
@pytest.mark.vcr()
def test_song_from_url():
"""
Tests if Song.from_url() works correctly.
"""
song = Song.from_url("https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU")
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
assert song.download_url == None
@pytest.mark.vcr()
def test_song_from_search_term():
"""
Tests if Song.from_search_term() works correctly.
"""
song = Song.from_search_term("Dirty Palm - Ropes")
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
assert song.download_url == None
def test_song_from_data_dump():
"""
Tests if Song.from_data_dump() works correctly.
"""
# Loads from str
song = Song.from_data_dump(
"""
{
"name": "Ropes",
"artists": ["Dirty Palm", "Chandler Jewels"],
"album_name": "Ropes",
"album_artist": "Dirty Palm",
"genres": ["gaming edm", "melbourne bounce international"],
"disc_number": 1,
"duration": 188,
"year": 2021,
"date": "2021-10-28",
"track_number": 1,
"tracks_count": 1,
"isrc": "GB2LD2110301",
"song_id": "1t2qKa8K72IBC8yQlhD9bU",
"cover_url": "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332",
"explicit": false,
"download_url": null,
"artist" : "Dirty Palm",
"disc_count": 1,
"copyright": "",
"publisher": "",
"url": "https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU"
}
"""
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
assert song.download_url == None
def test_song_from_data_dump_wrong_type():
"""
Tests if Song.from_data_dump() raises exception when wrong type is passed.
"""
with pytest.raises(TypeError):
Song.from_data_dump(1) # type: ignore
def test_song_from_dict():
"""
Tests if Song.from_dict() works correctly.
"""
song = Song.from_dict(
{
"name": "Ropes",
"artists": ["Dirty Palm", "Chandler Jewels"],
"album_name": "Ropes",
"album_artist": "Dirty Palm",
"genres": ["gaming edm", "melbourne bounce international"],
"disc_number": 1,
"duration": 188,
"year": 2021,
"date": "2021-10-28",
"track_number": 1,
"tracks_count": 1,
"isrc": "GB2LD2110301",
"song_id": "1t2qKa8K72IBC8yQlhD9bU",
"cover_url": "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332",
"explicit": False,
"download_url": None,
"artist": "Dirty Palm",
"disc_count": 1,
"copyright": "",
"publisher": "",
"url": "https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU",
}
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
| 29.273438 | 92 | 0.59154 |
837341f356868dd6b2cca6e0bc5647f83e3b7686 | 8,267 | py | Python | benchmark/jsonresume/thrift/run.py | sourcemeta/json-size-benchmark | ec18002407fc97ea3181874b84c6342841cfb0a9 | [
"Apache-2.0"
] | null | null | null | benchmark/jsonresume/thrift/run.py | sourcemeta/json-size-benchmark | ec18002407fc97ea3181874b84c6342841cfb0a9 | [
"Apache-2.0"
] | 4 | 2022-01-15T01:27:24.000Z | 2022-01-19T00:47:20.000Z | benchmark/jsonresume/thrift/run.py | sourcemeta/json-size-benchmark | ec18002407fc97ea3181874b84c6342841cfb0a9 | [
"Apache-2.0"
] | null | null | null | def encode(json, schema):
payload = schema.Main()
payload.basics = schema.Basics()
payload.basics.name = json['basics']['name']
payload.basics.label = json['basics']['label']
payload.basics.picture = json['basics']['picture']
payload.basics.email = json['basics']['email']
payload.basics.phone = json['basics']['phone']
payload.basics.website = json['basics']['website']
payload.basics.summary = json['basics']['summary']
payload.basics.location = schema.Location()
payload.basics.location.address = json['basics']['location']['address']
payload.basics.location.postalCode = json['basics']['location']['postalCode']
payload.basics.location.city = json['basics']['location']['city']
payload.basics.location.countryCode = json['basics']['location']['countryCode']
payload.basics.location.region = json['basics']['location']['region']
payload.basics.profiles = [
schema.Profile(),
schema.Profile()
]
payload.basics.profiles[0].network = json['basics']['profiles'][0]['network']
payload.basics.profiles[0].username = json['basics']['profiles'][0]['username']
payload.basics.profiles[0].url = json['basics']['profiles'][0]['url']
payload.basics.profiles[1].network = json['basics']['profiles'][1]['network']
payload.basics.profiles[1].username = json['basics']['profiles'][1]['username']
payload.basics.profiles[1].url = json['basics']['profiles'][1]['url']
payload.work = [schema.Work()]
payload.work[0].company = json['work'][0]['company']
payload.work[0].position = json['work'][0]['position']
payload.work[0].website = json['work'][0]['website']
payload.work[0].startDate = json['work'][0]['startDate']
payload.work[0].endDate = json['work'][0]['endDate']
payload.work[0].summary = json['work'][0]['summary']
payload.work[0].highlights = json['work'][0]['highlights']
payload.volunteer = [schema.Volunteer()]
payload.volunteer[0].organization = json['volunteer'][0]['organization']
payload.volunteer[0].position = json['volunteer'][0]['position']
payload.volunteer[0].website = json['volunteer'][0]['website']
payload.volunteer[0].startDate = json['volunteer'][0]['startDate']
payload.volunteer[0].endDate = json['volunteer'][0]['endDate']
payload.volunteer[0].summary = json['volunteer'][0]['summary']
payload.volunteer[0].highlights = json['volunteer'][0]['highlights']
payload.education = [schema.Education()]
payload.education[0].institution = json['education'][0]['institution']
payload.education[0].area = json['education'][0]['area']
payload.education[0].studyType = json['education'][0]['studyType']
payload.education[0].startDate = json['education'][0]['startDate']
payload.education[0].endDate = json['education'][0]['endDate']
payload.education[0].gpa = json['education'][0]['gpa']
payload.education[0].courses = json['education'][0]['courses']
payload.awards = [schema.Award()]
payload.awards[0].title = json['awards'][0]['title']
payload.awards[0].date = json['awards'][0]['date']
payload.awards[0].awarder = json['awards'][0]['awarder']
payload.awards[0].summary = json['awards'][0]['summary']
payload.publications = [schema.Publication()]
payload.publications[0].name = json['publications'][0]['name']
payload.publications[0].publisher = json['publications'][0]['publisher']
payload.publications[0].releaseDate = json['publications'][0]['releaseDate']
payload.publications[0].website = json['publications'][0]['website']
payload.publications[0].summary = json['publications'][0]['summary']
payload.skills = [schema.Skill(), schema.Skill()]
payload.skills[0].name = json['skills'][0]['name']
payload.skills[0].level = json['skills'][0]['level']
payload.skills[0].keywords = json['skills'][0]['keywords']
payload.skills[1].name = json['skills'][1]['name']
payload.skills[1].level = json['skills'][1]['level']
payload.skills[1].keywords = json['skills'][1]['keywords']
payload.languages = [schema.Language()]
payload.languages[0].language = json['languages'][0]['language']
payload.languages[0].fluency = json['languages'][0]['fluency']
payload.interests = [schema.Interest()]
payload.interests[0].name = json['interests'][0]['name']
payload.interests[0].keywords = json['interests'][0]['keywords']
payload.references = [schema.Reference()]
payload.references[0].name = json['references'][0]['name']
payload.references[0].reference = json['references'][0]['reference']
return payload
def decode(payload):
return {
'basics': {
'name': payload.basics.name,
'label': payload.basics.label,
'picture': payload.basics.picture,
'email': payload.basics.email,
'phone': payload.basics.phone,
'website': payload.basics.website,
'summary': payload.basics.summary,
'location': {
'address': payload.basics.location.address,
'postalCode': payload.basics.location.postalCode,
'city': payload.basics.location.city,
'countryCode': payload.basics.location.countryCode,
'region': payload.basics.location.region
},
'profiles': [
{
'network': payload.basics.profiles[0].network,
'username': payload.basics.profiles[0].username,
'url': payload.basics.profiles[0].url
},
{
'network': payload.basics.profiles[1].network,
'username': payload.basics.profiles[1].username,
'url': payload.basics.profiles[1].url
}
]
},
'work': [
{
'company': payload.work[0].company,
'position': payload.work[0].position,
'website': payload.work[0].website,
'startDate': payload.work[0].startDate,
'endDate': payload.work[0].endDate,
'summary': payload.work[0].summary,
'highlights': payload.work[0].highlights
}
],
'volunteer': [
{
'organization': payload.volunteer[0].organization,
'position': payload.volunteer[0].position,
'website': payload.volunteer[0].website,
'startDate': payload.volunteer[0].startDate,
'endDate': payload.volunteer[0].endDate,
'summary': payload.volunteer[0].summary,
'highlights': payload.volunteer[0].highlights
}
],
'education': [
{
'institution': payload.education[0].institution,
'area': payload.education[0].area,
'studyType': payload.education[0].studyType,
'startDate': payload.education[0].startDate,
'endDate': payload.education[0].endDate,
'gpa': payload.education[0].gpa,
'courses': payload.education[0].courses
}
],
'awards': [
{
'title': payload.awards[0].title,
'date': payload.awards[0].date,
'awarder': payload.awards[0].awarder,
'summary': payload.awards[0].summary
}
],
'publications': [
{
'name': payload.publications[0].name,
'publisher': payload.publications[0].publisher,
'releaseDate': payload.publications[0].releaseDate,
'website': payload.publications[0].website,
'summary': payload.publications[0].summary
}
],
'skills': [
{
'name': payload.skills[0].name,
'level': payload.skills[0].level,
'keywords': payload.skills[0].keywords
},
{
'name': payload.skills[1].name,
'level': payload.skills[1].level,
'keywords': payload.skills[1].keywords
}
],
'languages': [
{
'language': payload.languages[0].language,
'fluency': payload.languages[0].fluency
}
],
'interests': [
{
'name': payload.interests[0].name,
'keywords': payload.interests[0].keywords,
}
],
'references': [
{
'name': payload.references[0].name,
'reference': payload.references[0].reference
}
]
}
| 39.937198 | 83 | 0.605056 |
9109db67f9e55ca86c32b303d432d0c24452927f | 8,223 | py | Python | qa/mc_sc_forging3.py | nastenko/Sidechains-SDK | 35df59d5edc16138eaea081ee692c03a469a802d | [
"MIT"
] | 111 | 2020-07-29T22:55:43.000Z | 2022-03-26T22:56:42.000Z | qa/mc_sc_forging3.py | nastenko/Sidechains-SDK | 35df59d5edc16138eaea081ee692c03a469a802d | [
"MIT"
] | 71 | 2020-07-25T20:54:01.000Z | 2022-03-17T13:53:27.000Z | qa/mc_sc_forging3.py | nastenko/Sidechains-SDK | 35df59d5edc16138eaea081ee692c03a469a802d | [
"MIT"
] | 56 | 2020-07-28T15:39:59.000Z | 2022-03-30T02:03:23.000Z | #!/usr/bin/env python2
from SidechainTestFramework.sc_test_framework import SidechainTestFramework
from SidechainTestFramework.sc_boostrap_info import SCNodeConfiguration, SCCreationInfo, MCConnectionInfo, \
SCNetworkConfiguration
from test_framework.util import initialize_chain_clean, start_nodes, \
websocket_port_by_mc_node_index, connect_nodes_bi, disconnect_nodes_bi
from SidechainTestFramework.scutil import bootstrap_sidechain_nodes, start_sc_nodes, generate_next_blocks
from SidechainTestFramework.sc_forging_util import *
"""
Check Latus forger behavior for:
1. Sidechain block with recursive ommers to the same mc branch inclusion: mainchain fork races.
Configuration:
Start 3 MC nodes and 1 SC node (with default websocket configuration).
SC node connected to the first MC node.
MC nodes are connected.
Test:
- Synchronize MC nodes to the point of SC Creation Block.
- Disconnect MC nodes.
- Forge SC block, verify that there is no MC Headers and Data, no ommers.
- Mine MC block on MC node 1, sync with MC node 3, then forge SC block respectively, verify MC data inclusion.
- Mine 2 MC blocks on MC node 2. Connect and synchronize MC nodes 1 and 2. Fork became an active chain.
- Forge SC block, verify that previously forged block was set as ommer, verify MC data inclusion.
- Mine 2 MC blocks in MC node 3, sync again with MC Node 1. Previous chain is active again.
- Forge SC block, verify MC data inclusion and ommers/subommers inclusion.
MC blocks on MC node 1 in the end:
220 - 221 - 222 - 223*
\
- 221' - 222'
SC Block on SC node in the end: <sc block/slot number>[<mc headers included>; <mc refdata included>; <ommers>]
G[220h;220d;] - 0[;;] - 1[221h;221d;]
\
- 2[221'h,222'h;;1[...]]
\
- 3[221h,222h,223h;;2[...;1]]
"""
class MCSCForging3(SidechainTestFramework):
number_of_mc_nodes = 3
number_of_sidechain_nodes = 1
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, self.number_of_mc_nodes)
def setup_network(self, split = False):
# Setup nodes and connect them
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
self.sync_all()
def setup_nodes(self):
# Start 3 MC nodes
return start_nodes(self.number_of_mc_nodes, self.options.tmpdir)
def sc_setup_chain(self):
# Bootstrap new SC, specify SC node 1 connection to MC node 1
mc_node_1 = self.nodes[0]
sc_node_1_configuration = SCNodeConfiguration(
MCConnectionInfo(address="ws://{0}:{1}".format(mc_node_1.hostname, websocket_port_by_mc_node_index(0)))
)
network = SCNetworkConfiguration(SCCreationInfo(mc_node_1, 600, 1000),
sc_node_1_configuration)
bootstrap_sidechain_nodes(self.options.tmpdir, network)
def sc_setup_nodes(self):
# Start 1 SC node
return start_sc_nodes(self.number_of_sidechain_nodes, self.options.tmpdir)
def run_test(self):
# Synchronize mc_node1, mc_node2 and mc_node3, then disconnect them.
self.sync_all()
disconnect_nodes_bi(self.nodes, 0, 1)
disconnect_nodes_bi(self.nodes, 0, 2)
mc_node1 = self.nodes[0]
mc_node2 = self.nodes[1]
mc_node3 = self.nodes[2]
sc_node1 = self.sc_nodes[0]
# Test 1: Generate SC block, when all MC blocks already synchronized.
# Generate 1 SC block
scblock_id0 = generate_next_blocks(sc_node1, "first node", 1)[0]
# Verify that SC block has no MC headers, ref data, ommers
check_mcheaders_amount(0, scblock_id0, sc_node1)
check_mcreferencedata_amount(0, scblock_id0, sc_node1)
check_ommers_amount(0, scblock_id0, sc_node1)
# Test 2: Generate SC block, when new MC block following the same Tip appear.
# Generate 1 MC block on the first MC node
mcblock_hash1 = mc_node1.generate(1)[0]
# Sync MC nodes 1 and 3 once
connect_nodes_bi(self.nodes, 0, 2)
self.sync_nodes([mc_node1, mc_node3])
disconnect_nodes_bi(self.nodes, 0, 2)
# Generate 1 SC block
scblock_id1 = generate_next_blocks(sc_node1, "first node", 1)[0]
check_scparent(scblock_id0, scblock_id1, sc_node1)
# Verify that SC block contains MC block as a MainchainReference
check_mcheaders_amount(1, scblock_id1, sc_node1)
check_mcreferencedata_amount(1, scblock_id1, sc_node1)
check_mcreference_presence(mcblock_hash1, scblock_id1, sc_node1)
check_ommers_amount(0, scblock_id1, sc_node1)
# Test 3: Generate SC block, when new MC blocks following different Tip appear. Ommers expected.
# Generate another 2 MC blocks on the second MC node
fork_mcblock_hash1 = mc_node2.generate(1)[0]
fork_mcblock_hash2 = mc_node2.generate(1)[0]
# Connect and synchronize MC node 1 to MC node 2
connect_nodes_bi(self.nodes, 0, 1)
self.sync_nodes([mc_node1, mc_node2])
# MC Node 1 should replace mcblock_hash1 Tip with [fork_mcblock_hash1, fork_mcblock_hash2]
assert_equal(fork_mcblock_hash2, mc_node1.getbestblockhash())
# Generate 1 SC block
scblock_id2 = generate_next_blocks(sc_node1, "first node", 1)[0]
check_scparent(scblock_id0, scblock_id2, sc_node1)
# Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
check_mcheaders_amount(2, scblock_id2, sc_node1)
check_mcreferencedata_amount(0, scblock_id2, sc_node1)
check_mcheader_presence(fork_mcblock_hash1, scblock_id2, sc_node1)
check_mcheader_presence(fork_mcblock_hash2, scblock_id2, sc_node1)
# Verify that SC block contains 1 Ommer with 1 MainchainHeader
check_ommers_amount(1, scblock_id2, sc_node1)
check_ommers_cumulative_score(1, scblock_id2, sc_node1)
check_ommer(scblock_id1, [mcblock_hash1], scblock_id2, sc_node1)
# Test 4: Generate SC block, when new MC blocks following previous Tip appear and lead to chain switching again.
# Ommers expected. Subommers expected with mc blocks for the same MC branch as current SC block,
# but orphaned to parent Ommer MC headers.
# Generate 2 more mc blocks in MC node 3
mcblock_hash2 = mc_node3.generate(1)[0]
mcblock_hash3 = mc_node3.generate(1)[0]
# Sync MC nodes 1 and 3 once
connect_nodes_bi(self.nodes, 0, 2)
self.sync_nodes([mc_node1, mc_node3])
disconnect_nodes_bi(self.nodes, 0, 2)
# MC Node 1 should replace back fork_mcblock_hash2 Tip with [mcblock_hash1, mcblock_hash2, mcblock_hash3]
assert_equal(mcblock_hash3, mc_node1.getbestblockhash())
# Generate SC block
scblock_id3 = generate_next_blocks(sc_node1, "first node", 1)[0]
check_scparent(scblock_id0, scblock_id3, sc_node1)
# Verify that SC block contains newly created MC blocks as a MainchainHeaders and no MainchainRefData
check_mcheaders_amount(3, scblock_id3, sc_node1)
check_mcreferencedata_amount(0, scblock_id3, sc_node1)
check_mcheader_presence(mcblock_hash1, scblock_id3, sc_node1)
check_mcheader_presence(mcblock_hash2, scblock_id3, sc_node1)
check_mcheader_presence(mcblock_hash3, scblock_id3, sc_node1)
# Verify Ommers cumulative score, that must also count 1 subommer
check_ommers_cumulative_score(2, scblock_id3, sc_node1)
# Verify that SC block contains 1 Ommer with 2 MainchainHeader
check_ommers_amount(1, scblock_id3, sc_node1)
check_ommer(scblock_id2, [fork_mcblock_hash1, fork_mcblock_hash2], scblock_id3, sc_node1)
# Verify that Ommer contains 1 subommer with 1 MainchainHeader
check_subommer(scblock_id2, scblock_id1, [mcblock_hash1], scblock_id3, sc_node1)
if __name__ == "__main__":
MCSCForging3().main() | 46.457627 | 120 | 0.693056 |
9bd32a5691ee5b36fc7e487e72e2d78d98382695 | 8,368 | py | Python | tests/common/test_op/reduce_max_ad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_op/reduce_max_ad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_op/reduce_max_ad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function:reduce_max_ad"""
import akg.tvm
import akg
from akg.utils import kernel_exec as utils
from akg.utils import custom_tiling as ct_util
from akg.ops.math.cast import cast
from akg.ops.math import reduce_max
from akg.utils import format_transform as ft_util
reduce_max_ad_set_dim_map = {
}
def reduce_max_ad_set_dim_func(data, head, axis, keepdims):
key = []
key.append(tuple(data.shape))
key.append(tuple(axis))
key.append(keepdims)
hash_key = str(tuple(key))
if hash_key in reduce_max_ad_set_dim_map.keys():
return ct_util.set_dims(reduce_max_ad_set_dim_map[hash_key])
else:
return ""
@ct_util.reg_set_dim_func(reduce_max_ad_set_dim_func)
def reduce_max_ad(head, data, axis, keepdims):
b = reduce_max.reduce_max(data, axis, keepdims)
_jacs = akg.differentiate(b, [data], head)
return _jacs[0]
def reduce_max_ad_optimized(head, data, axis, keepdims):
def get_shape(pld): return [d.value for d in pld.shape]
def custom_reduce_max_fdiff(out, inputs, grad, ad_attrs, new_pld_array):
data = inputs[0]
shape = get_shape(data)
max_ = akg.lang.cce.reduce_max(data, axis=axis, keepdims=keepdims)
max_broadcast = akg.lang.cce.broadcast(max_, shape)
return [akg.tvm.compute(shape,
lambda *indices:
akg.tvm.expr.Select(data(*indices) == max_broadcast(*indices),
grad(*get_reduced_indices(*indices, axis=axis, keepdims=keepdims)),
akg.tvm.const(0, dtype=data.dtype)),
name="reduce_max_ad2")]
l = reduce_max.reduce_max(data, axis, keepdims)
[dl_ddata] = akg.differentiate(l, [data], head, None, None, override={l: ([data], custom_reduce_max_fdiff)})
return dl_ddata
def get_reduced_indices(*indices, axis, keepdims):
"""Get the adjoint for an arbitrary dimension input."""
# get all indices
indices_list = list(indices)
# list of reduction axis: transform negative indices into positive
# axis in this list wont exist after the reduction
axis_list = ft_util.refine_reduce_axis(indices_list, list(axis))
# get indices after reduction
if keepdims:
grad_indices_list = [index_i if i not in axis_list else 0 for i, index_i in enumerate(indices_list)]
else:
grad_indices_list = [index_i for i, index_i in enumerate(indices_list) if i not in axis_list]
grad_ind = tuple(grad_indices_list)
return grad_ind
def reduce_max_ad_optimized_manual_schedule(input_shape, dtype, axis, keepdims, polyhedral=True, attrs=None):
def custom_reduce_max_fdiff(out, inputs, head_, ad_attrs, new_pld_array):
data_ = inputs[0]
shape = data_.shape
# reduces maximum value for each column
max_ = akg.lang.cce.reduce_max(data_, axis=axis, keepdims=True)
# copies reduced values to get the original shape
max_broadcast = akg.lang.cce.broadcast(max_, shape)
# head broadcast is needed to generate correct cce code for the selection operation
head_broadcast = akg.tvm.compute(shape,
lambda *indices:
head_(*get_reduced_indices(*indices, axis=axis, keepdims=keepdims)))
# zero all the values that are not max values on the result, remaining is equal to the adjoint of the output
max_values_and_zeros = akg.tvm.compute(shape,
lambda *indices: akg.tvm.expr.Select(data_(*indices) == max_broadcast(*indices),
head_broadcast(*indices),
akg.tvm.const(0, dtype='float16')),
name="reduce_max_ad2")
# cast data back to the original dtype
if dtype != 'float16':
return [cast(max_values_and_zeros, dtype)]
else:
return [max_values_and_zeros]
# tensor for the input data
data = akg.tvm.placeholder(input_shape, dtype, name="input_data")
# computation of reduce max
# not used on the schedule because this is the diferentiation op
l = reduce_max.reduce_max(data, axis, keepdims)
# adjoint tensor for the differentiation
head = akg.tvm.placeholder(l.shape, name="head", dtype=l.dtype)
# cast input data
if dtype != 'float16':
data_cast = cast(data, "float16")
head_cast = cast(head, "float16")
else:
data_cast = data
head_cast = head
# override differentiation computation with custom function
[dl_ddata] = akg.differentiate(l, [data_cast], head_cast, None, None,
override={l: ([data_cast], custom_reduce_max_fdiff)})
# get tensors from custom function
if dtype != 'float16':
max_values_and_zeros = dl_ddata.op.input_tensors[0]
max_broadcast = max_values_and_zeros.op.input_tensors[1]
max_ = max_broadcast.op.input_tensors[0]
head_broadcast = max_values_and_zeros.op.input_tensors[2]
else:
max_broadcast = dl_ddata.op.input_tensors[1]
max_ = max_broadcast.op.input_tensors[0]
head_broadcast = dl_ddata.op.input_tensors[2]
# schedule for differetiation operation
# inputs: data and head
s = akg.tvm.create_schedule([dl_ddata.op])
# cache reads of inputs
if dtype != 'float16':
head_ub = s.cache_read(head, "local.UB", [head_cast])
data_ub = s.cache_read(data, "local.UB", [data_cast])
else:
# no cast operation
head_ub = s.cache_read(head_cast, "local.UB", [head_broadcast])
data_ub = s.cache_read(data_cast, "local.UB", [max_, dl_ddata])
# cache write for the output
dl_ddata_ub = s.cache_write(dl_ddata, "local.UB")
# get tiling attributes
if attrs is None:
raise Exception('attrs is None')
tiling_factors = attrs['tile']
split_iterators = []
assert len(tiling_factors) == len(dl_ddata.shape)
# split the final compute and save the iterators
for index, factor in enumerate(tiling_factors):
split_iterators.append(s[dl_ddata].split(dl_ddata.op.axis[index], factor))
# get iterators
iterator1 = split_iterators[0][0]
# move computation of when there is a cast
if dtype != "float16":
s[data_cast].compute_at(s[dl_ddata], iterator1)
s[data_cast].set_scope("local.UB")
s[head_cast].compute_at(s[dl_ddata], iterator1)
s[head_cast].set_scope("local.UB")
s[max_values_and_zeros].compute_at(s[dl_ddata], iterator1)
s[max_values_and_zeros].set_scope("local.UB")
# move cache reads and writes
s[data_ub].compute_at(s[dl_ddata], iterator1)
s[head_ub].compute_at(s[dl_ddata], iterator1)
s[dl_ddata_ub].compute_at(s[dl_ddata], iterator1)
# move computation of the diferentiation
s[max_].compute_at(s[dl_ddata], iterator1)
s[max_].set_scope("local.UB")
s[max_broadcast].compute_at(s[dl_ddata], iterator1)
s[max_broadcast].set_scope("local.UB")
s[head_broadcast].compute_at(s[dl_ddata], iterator1)
s[head_broadcast].set_scope("local.UB")
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod = akg.build(s, [head, data, dl_ddata], "cce",
name="reduce_max_ad_manual_schedule", attrs=attrs, polyhedral=polyhedral)
source_code = mod.imported_modules[0].get_source()
kernel_name = "reduce_max_ad_manual_schedule"
utils.create_code(kernel_name, './', source_code)
return mod
| 41.019608 | 123 | 0.655951 |
d9464d607c1b47f0ba12a3cc338d0038d93a9418 | 697 | py | Python | mod_auth_external/examples/python/prosody-auth-example.py | Contatta/prosody-modules | dad756968054325700181699a63fe19fcd471fc9 | [
"MIT"
] | 6 | 2017-04-27T08:40:27.000Z | 2021-05-17T05:39:20.000Z | mod_auth_external/examples/python/prosody-auth-example.py | Contatta/prosody-modules | dad756968054325700181699a63fe19fcd471fc9 | [
"MIT"
] | 5 | 2020-09-05T00:40:57.000Z | 2022-01-22T05:18:54.000Z | mod_auth_external/examples/python/prosody-auth-example.py | Contatta/prosody-modules | dad756968054325700181699a63fe19fcd471fc9 | [
"MIT"
] | 2 | 2016-07-31T11:56:03.000Z | 2019-03-15T13:11:56.000Z | #!/usr/bin/env python2
import sys
def auth(username, password):
if username == "someone":
return "1"
return "0"
def respond(ret):
sys.stdout.write(ret+"\n")
sys.stdout.flush()
methods = {
"auth": { "function": auth, "parameters": 2 }
}
while 1:
line = sys.stdin.readline().rstrip("\n")
method, sep, data = line.partition(":")
if method in methods:
method_info = methods[method]
split_data = data.split(":", method_info["parameters"])
if len(split_data) == method_info["parameters"]:
respond(method_info["function"](*split_data))
else:
respond("error: incorrect number of parameters to method '%s'"%method)
else:
respond("error: method '%s' not implemented"%method)
| 23.233333 | 73 | 0.675753 |
90eed4f7be7ef6f65d052555d904f1c75e8ca948 | 1,841 | py | Python | python/v2/btm/token.py | RK41099/varimi | c7ed33a9653553dfeb1d41bdabd28c318ecdc14c | [
"MIT"
] | 28 | 2018-05-07T16:59:25.000Z | 2021-04-28T05:14:44.000Z | python/v2/btm/token.py | RK41099/varimi | c7ed33a9653553dfeb1d41bdabd28c318ecdc14c | [
"MIT"
] | 8 | 2018-05-10T14:19:03.000Z | 2018-06-16T02:41:50.000Z | python/v2/btm/token.py | RK41099/varimi | c7ed33a9653553dfeb1d41bdabd28c318ecdc14c | [
"MIT"
] | 14 | 2018-05-10T16:34:28.000Z | 2022-03-24T23:19:31.000Z | """
Basic settings for an NEP5 Token and crowdsale
"""
from boa.interop.Neo.Storage import *
TOKEN_NAME = 'BitMari Points'
TOKEN_SYMBOL = 'BTM'
TOKEN_DECIMALS = 8
# This is the script hash of the address for the owner of the token
# This can be found in ``neo-python`` with the walet open, use ``wallet`` command
TOKEN_OWNER = b'#\xba\'\x03\xc52c\xe8\xd6\xe5"\xdc2 39\xdc\xd8\xee\xe9'
TOKEN_CIRC_KEY = b'in_circulation'
TOKEN_TOTAL_SUPPLY = 100000000 * 100000000 # 100m total supply * 10^8 ( decimals)
TOKEN_INITIAL_AMOUNT = 25000000 * 100000000 # 25m to owners * 10^8
# for now assume 1 dollar per token, and one neo = 40 dollars * 10^8
TOKENS_PER_NEO = 400 * 100000000
# for now assume 1 dollar per token, and one gas = 20 dollars * 10^8
TOKENS_PER_GAS = 200 * 100000000
# maximum amount you can mint in the limited round ( 500 neo/person * 40 Tokens/NEO * 10^8 )
MAX_EXCHANGE_LIMITED_ROUND = 500 * 40 * 100000000
# when to start the crowdsale 755000
BLOCK_SALE_START = 10
# when to end the initial limited round 755000 + 10000
LIMITED_ROUND_END = 999_999_999_999
KYC_KEY = b'kyc_ok'
LIMITED_ROUND_KEY = b'r1'
def crowdsale_available_amount(ctx):
"""
:return: int The amount of tokens left for sale in the crowdsale
"""
in_circ = Get(ctx, TOKEN_CIRC_KEY)
available = TOKEN_TOTAL_SUPPLY - in_circ
return available
def add_to_circulation(ctx, amount):
"""
Adds an amount of token to circlulation
:param amount: int the amount to add to circulation
"""
current_supply = Get(ctx, TOKEN_CIRC_KEY)
current_supply += amount
Put(ctx, TOKEN_CIRC_KEY, current_supply)
return True
def get_circulation(ctx):
"""
Get the total amount of tokens in circulation
:return:
int: Total amount in circulation
"""
return Get(ctx, TOKEN_CIRC_KEY)
| 23.602564 | 92 | 0.712113 |
0ba307ae9cdb7ded7eed354fab9adc1d3aa0f305 | 2,784 | py | Python | Contributions/BioMath_Stress_Simu/Stress_Simulation.py | OluSure/Hacktoberfest2021-1 | ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea | [
"MIT"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | Contributions/BioMath_Stress_Simu/Stress_Simulation.py | OluSure/Hacktoberfest2021-1 | ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea | [
"MIT"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | Contributions/BioMath_Stress_Simu/Stress_Simulation.py | OluSure/Hacktoberfest2021-1 | ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea | [
"MIT"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | #!/usr/bin/env python
# coding: utf-8
# In[4]:
#!/usr/bin/env python
# coding: utf-8
# In[23]:
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 22:52:05 2020
@author: bruno
"""
from vpython import *
from numpy import *
import matplotlib.pyplot as plt
from drawnow import *
import time
# First parameters:
L = 0.2 # in meters
H = 0.2 # in meters
W = 0.2 #in meters
# Initial Properties:
e = 8000 #Young's Module (Pa)
E = 0 #Deformation
M = 0 #Actuator Movement
S = 0 #Stress
# Parameters for actuator movement:
I = H #Initial position
Ly = I #Y-axis displacement.
# Defining the material (block).
bloco = box(pos = vector(0, 0, 0), size = vector(L, H, W), color = color.red);
# Defining the actuator that will compress the material.
atuador = cylinder(pos=vector(0, 0.1 ,0), axis=vector(0,5,0), radius=0.20, size = vector(L, 0.01, W), color = color.blue)
# Text boxes.
forceL=label(pos = vec(0,0.6,0), text = 'E = %1.1f N' %(e))
tensaoL=label(pos = vec(0.7,0.6,0), text = 'Strain = %1.1f ' %(0))
deformaL=label(pos = vec(-0.7,0.6,0), text = 'Displ. = %1.1f ' %(E))
#Defining the support platform for our block.
ground = box(pos = vector(0, -(H/2) , 0), size = vector(2, 0.01, 1), color = color.white)
# arrow indicating strength
pointer = arrow(pos=vector(0.5, -0.4 ,0), axis = vector(0, -0.5, 0), shaftwidth = 0, color = color.red)
# Arrays for the graph
Stress = []
Strain = []
# Matlib Graphics
plt.ion()
def makeFig():
plt.plot(Strain, Stress, 'D-') #plotar força por posição do bloco
plt.xlabel('Strain')
plt.ylabel('Stress')
plt.title('Stress/Strain Plot')
#Compression Movement:
for cont in range(0, 18):
# increase in strength
M = M + 0.005 #Movement of 0.0005.
# Updated parameters:
Ly = Ly + 0.005 #Total Y shift = 0.009
E = (I-Ly)/I #Deformation
S = E*e #Stress(Tensão)
# Actuator Movement:
atuador.pos = vector(0, 0.1 - M,0)
# Block Compression:
bloco.size = vector(L + M, (H - M), W + 2*(M)) #Compressão em (X, Y, Z) de acordo com Coef. Poiss.
bloco.pos = vector(0,-M/2 ,0)
# Arrow Position:
pointer.pos = vector(0.5, 0.1 - M ,0)
# Updating other parameters
pointer.pos = vector(0.5, 0.5 - M ,0)
forceL.text = 'E = %1.0f N' %(e)
tensaoL.text = 'Tensao = %1.0f N ' %(S*-1) #Its value must be presented in module.
deformaL.text = 'Deform. = %1.2f ' %(E*-1) #Its value must be presented in module.
# Updating vectors to the graphic shown in real time
Stress.append(S*-1)
Strain.append(E*-1)
drawnow(makeFig)
#Represent the movement slowly
time.sleep(0.005)
| 27.029126 | 121 | 0.584411 |
c11a04d19b070803d76154fe0b1aa83503b5ca5e | 298 | py | Python | python/testData/refactoring/move/collectMovableModuleMembers.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/refactoring/move/collectMovableModuleMembers.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/refactoring/move/collectMovableModuleMembers.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | from module import symbol as alias
CONST = 42
# x is visible externally in Python 2
[x for x in range(3)]
for i in range(3):
pass
if True:
class C:
class Inner:
pass
def method(self):
pass
def outer_func():
def inner_func():
pass
| 12.416667 | 37 | 0.560403 |
4b7bfd209e2d14579767b274b12bcb3acf6fdeb9 | 898 | py | Python | ephysanalysis/__init__.py | pbmanis/ephysanalysis | b745e380ba1e842d862eec038a9c37521a698939 | [
"MIT"
] | null | null | null | ephysanalysis/__init__.py | pbmanis/ephysanalysis | b745e380ba1e842d862eec038a9c37521a698939 | [
"MIT"
] | 1 | 2020-05-01T15:06:09.000Z | 2020-05-01T15:06:09.000Z | ephysanalysis/__init__.py | pbmanis/ephysanalysis | b745e380ba1e842d862eec038a9c37521a698939 | [
"MIT"
] | 1 | 2020-05-01T13:25:44.000Z | 2020-05-01T13:25:44.000Z | #!/usr/bin/env python
# Use Semantic Versioning, http://semver.org/
version_info = (0, 2, 2, 'a')
__version__ = "%d.%d.%d%s" % version_info
#print ("apparent version: ", __version__)
import ephysanalysis.Fitting as Fitting
import ephysanalysis.Utility as Utility
import ephysanalysis.acq4read
import ephysanalysis.MatdatacRead
import ephysanalysis.DatacReader
import ephysanalysis.DataPlan
import ephysanalysis.getcomputer
import ephysanalysis.RmTauAnalysis
import ephysanalysis.SpikeAnalysis
import ephysanalysis.dataSummary
import ephysanalysis.IVSummary
import ephysanalysis.VCSummary
import ephysanalysis.PSCAnalyzer
import ephysanalysis.boundrect
import ephysanalysis.poisson_score
import ephysanalysis.bridge
import ephysanalysis.cursor_plot
import ephysanalysis.MakeClamps
import ephysanalysis.test_notch
import ephysanalysis.fix_objscale
import ephysanalysis.metaarray as MetaArray
| 25.657143 | 45 | 0.847439 |
ec7f80b83956699a93da6e33269782241b05525e | 4,359 | py | Python | coffin/template/defaultfilters.py | theatlantic/coffin | 0377074b78dbfa4be0b188a68f256de1db0dd718 | [
"BSD-3-Clause"
] | null | null | null | coffin/template/defaultfilters.py | theatlantic/coffin | 0377074b78dbfa4be0b188a68f256de1db0dd718 | [
"BSD-3-Clause"
] | null | null | null | coffin/template/defaultfilters.py | theatlantic/coffin | 0377074b78dbfa4be0b188a68f256de1db0dd718 | [
"BSD-3-Clause"
] | null | null | null | """Coffin automatically makes Django's builtin filters available in Jinja2,
through an interop-layer.
However, Jinja 2 provides room to improve the syntax of some of the
filters. Those can be overridden here.
TODO: Most of the filters in here need to be updated for autoescaping.
"""
from coffin.template import Library
from jinja2.runtime import Undefined
# from jinja2 import Markup
from jinja2 import filters
register = Library()
def url(view_name, *args, **kwargs):
"""This is an alternative to the {% url %} tag. It comes from a time
before Coffin had a port of the tag.
"""
from coffin.template.defaulttags import url
return url._reverse(view_name, args, kwargs)
register.jinja2_filter(url, jinja2_only=True)
register.object(url)
@register.jinja2_filter(jinja2_only=True)
def timesince(value, *arg):
if value is None or isinstance(value, Undefined):
return u''
from django.utils.timesince import timesince
return timesince(value, *arg)
@register.jinja2_filter(jinja2_only=True)
def timeuntil(value, *args):
if value is None or isinstance(value, Undefined):
return u''
from django.utils.timesince import timeuntil
return timeuntil(value, *args)
@register.jinja2_filter(jinja2_only=True)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value is None or isinstance(value, Undefined):
return u''
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.jinja2_filter(jinja2_only=True)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value is None or isinstance(value, Undefined):
return u''
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import time_format
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.jinja2_filter(jinja2_only=True)
def truncatewords(value, length):
# Jinja2 has it's own ``truncate`` filter that supports word
# boundaries and more stuff, but cannot deal with HTML.
from django.utils.text import truncate_words
return truncate_words(value, int(length))
@register.jinja2_filter(jinja2_only=True)
def truncatewords_html(value, length):
from django.utils.text import truncate_html_words
return truncate_html_words(value, int(length))
@register.jinja2_filter(jinja2_only=True)
def pluralize(value, s1='s', s2=None):
"""Like Django's pluralize-filter, but instead of using an optional
comma to separate singular and plural suffixes, it uses two distinct
parameters.
It also is less forgiving if applied to values that do not allow
making a decision between singular and plural.
"""
if s2 is not None:
singular_suffix, plural_suffix = s1, s2
else:
plural_suffix = s1
singular_suffix = ''
try:
if int(value) != 1:
return plural_suffix
except TypeError: # not a string or a number; maybe it's a list?
if len(value) != 1:
return plural_suffix
return singular_suffix
@register.jinja2_filter(jinja2_only=True)
def floatformat(value, arg=-1):
"""Builds on top of Django's own version, but adds strict error
checking, staying with the philosophy.
"""
from django.template.defaultfilters import floatformat
from coffin.interop import django_filter_to_jinja2
arg = int(arg) # raise exception
result = django_filter_to_jinja2(floatformat)(value, arg)
if result == '': # django couldn't handle the value
raise ValueError(value)
return result
@register.jinja2_filter(jinja2_only=True)
def default(value, default_value=u'', boolean=True):
"""Make the default filter, if used without arguments, behave like
Django's own version.
"""
return filters.do_default(value, default_value, boolean)
| 33.274809 | 76 | 0.705437 |
8d67405cb23783a340c985e67701249876152d2f | 6,233 | py | Python | tests/test_drf.py | remigermain/nested-multipart-parser | 3ac35f7637314e61d33cb23d6b1f8a59c8d3f01a | [
"MIT"
] | null | null | null | tests/test_drf.py | remigermain/nested-multipart-parser | 3ac35f7637314e61d33cb23d6b1f8a59c8d3f01a | [
"MIT"
] | null | null | null | tests/test_drf.py | remigermain/nested-multipart-parser | 3ac35f7637314e61d33cb23d6b1f8a59c8d3f01a | [
"MIT"
] | null | null | null | import unittest
from django.http import QueryDict
from django.conf import settings
settings.configure()
# need to be after settings configure
from rest_framework.test import APIRequestFactory # noqa: E402
from django.test.client import encode_multipart # noqa: E402
from nested_multipart_parser.drf import DrfNestedParser, NestedParser # noqa: E402
from rest_framework.request import Request # noqa: E402
from rest_framework.exceptions import ParseError # noqa: E402
def toQueryDict(data):
q = QueryDict(mutable=True)
q.update(data)
q._mutable = False
return q
class TestDrfParser(unittest.TestCase):
def test_querydict_mutable(self):
parser = NestedParser(
{
"dtc.key": 'value',
"dtc.vla": "value2",
"list[0]": "value1",
"list[1]": "value2",
"string": "value",
"dtc.hh.oo": "sub",
"dtc.hh.aa": "sub2"
},
)
self.assertTrue(parser.is_valid())
expected = toQueryDict({
"dtc": {
"key": "value",
"vla": "value2",
"hh": {
"oo": "sub",
"aa": "sub2"
}
},
"list": [
"value1",
"value2",
],
"string": "value",
})
self.assertEqual(parser.validate_data, expected)
self.assertFalse(parser.validate_data.mutable)
def test_settings(self):
from nested_multipart_parser.drf import NestedParser
data = {
"article.title": "youpi"
}
p = NestedParser(data)
self.assertTrue(p.is_valid())
expected = toQueryDict({
"article": {
"title": "youpi"
}
})
self.assertEqual(p.validate_data, expected)
# set settings
from django.conf import settings
options = {
"separator": "dot"
}
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER', options)
p = NestedParser(data)
self.assertTrue(p.is_valid())
expected = toQueryDict({
"article": {
"title": "youpi"
}
})
self.assertEqual(p.validate_data, expected)
def parser_boundary(self, data):
factory = APIRequestFactory()
content = encode_multipart('BoUnDaRyStRiNg', data)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
request = factory.put('/notes/547/', content,
content_type=content_type)
return Request(request, parsers=[DrfNestedParser()])
def test_views(self):
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER',
{"separator": "bracket"})
data = {
"dtc[key]": 'value',
"dtc[vla]": "value2",
"list[0]": "value1",
"list[1]": "value2",
"string": "value",
"dtc[hh][oo]": "sub",
"dtc[hh][aa]": "sub2"
}
results = self.parser_boundary(data)
expected = toQueryDict({
"dtc": {
"key": "value",
"vla": "value2",
"hh": {
"oo": "sub",
"aa": "sub2"
}
},
"list": [
"value1",
"value2",
],
"string": "value",
})
self.assertEqual(results.data, expected)
self.assertFalse(results.data.mutable)
def test_views_options(self):
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER', {"separator": "dot"})
data = {
"dtc.key": 'value',
"dtc.vla": "value2",
"list.0": "value1",
"list.1": "value2",
"string": "value",
"dtc.hh.oo": "sub",
"dtc.hh.aa": "sub2"
}
results = self.parser_boundary(data)
expected = toQueryDict({
"dtc": {
"key": "value",
"vla": "value2",
"hh": {
"oo": "sub",
"aa": "sub2"
}
},
"list": [
"value1",
"value2",
],
"string": "value",
})
self.assertEqual(results.data, expected)
self.assertFalse(results.data.mutable)
def test_views_invalid(self):
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER',
{"separator": "bracket"})
data = {
"dtc[key": 'value',
"dtc[hh][oo]": "sub",
"dtc[hh][aa]": "sub2"
}
results = self.parser_boundary(data)
with self.assertRaises(ParseError):
results.data
def test_views_invalid_options(self):
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER',
{"separator": "invalid"})
data = {
"dtc[key]": 'value',
"dtc[hh][oo]": "sub",
"dtc[hh][aa]": "sub2"
}
results = self.parser_boundary(data)
with self.assertRaises(AssertionError):
results.data
def test_views_options_mixed_invalid(self):
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER',
{"separator": "mixed"})
data = {
"dtc[key]": 'value',
"dtc[hh][oo]": "sub",
"dtc[hh][aa]": "sub2"
}
results = self.parser_boundary(data)
with self.assertRaises(ParseError):
results.data
def test_views_options_mixed_valid(self):
setattr(settings, 'DRF_NESTED_MULTIPART_PARSER',
{"separator": "mixed"})
data = {
"dtc.key": 'value',
"dtc.hh.oo": "sub",
"dtc.hh.aa": "sub2"
}
results = self.parser_boundary(data)
expected = {
"dtc": {
"key": "value",
"hh": {
"aa": "sub2",
"oo": "sub"
}
}
}
self.assertEqual(results.data, toQueryDict(expected))
| 28.990698 | 83 | 0.476336 |
65f8b9bcd123628974a5a442ef57eabd35ff06af | 7,014 | py | Python | simrd/simrd_experiments/uniform_linear/asymptotics.py | uwsampl/dtr-prototype | eff53cc4804cc7d6246a6e5086861ce2b846f62b | [
"Linux-OpenIB"
] | 90 | 2020-06-18T05:32:06.000Z | 2022-03-28T13:05:17.000Z | simrd/simrd_experiments/uniform_linear/asymptotics.py | merrymercy/dtr-prototype | bf40e182453a7d8d23581ea68f32a9d7d2037d62 | [
"Linux-OpenIB"
] | 5 | 2020-07-02T02:25:16.000Z | 2022-03-24T05:50:30.000Z | simrd/simrd_experiments/uniform_linear/asymptotics.py | uwsampl/dtr-prototype | eff53cc4804cc7d6246a6e5086861ce2b846f62b | [
"Linux-OpenIB"
] | 13 | 2020-06-27T07:01:54.000Z | 2022-01-18T07:31:01.000Z | import json
import time
import glob
from datetime import datetime
from pathos.multiprocessing import ProcessPool as Pool
import matplotlib.pyplot as plt
from simrd.heuristic import *
from simrd.runtime import *
from simrd_experiments.bounds import *
import simrd_experiments.util as util
from simrd_experiments.uniform_linear.run import run, chop_failures
"""
Experiments to evaluate the best-case asymptotic behavior of DTR using various
heuristics, by running the simulator on different memory/heuristic pairings.
These experiments do not capture any runtime overhead (hence 'best case'
performance).
"""
ASYMPTOTICS_MOD = 'uniform_linear/asymptotics'
def run_asymptotics(base, ns, heuristic, bound, runtime, releases=True, **kwargs):
config = {
'ns': ns,
'heuristic': str(heuristic),
'heuristic_features': list(heuristic.FEATURES),
'memory': str(bound),
'releases': releases,
'runtime': runtime.ID,
'runtime_features': list(runtime.FEATURES),
'kwargs': kwargs
}
p = Pool()
print('generating asymptotics data for config: {}...'.format(json.dumps(config, indent=2)))
args = []
for n in ns:
args.append([n, bound(n), heuristic, runtime, releases, kwargs])
t = time.time()
rts = p.map(run, *zip(*args))
t = time.time() - t
succ_ns, succ_rts = chop_failures(ns, rts)
print(' - succeeded between n={} and n={}'.format(succ_ns[0], succ_ns[-1]))
print(' done, took {} seconds.'.format(t))
results = {
'layers': succ_ns,
'computes': list(map(lambda rt: rt.telemetry.summary['remat_compute'], rts)),
'had_OOM': ns[0] != succ_ns[0],
'had_thrash': ns[-1] != succ_ns[-1]
}
date_str = datetime.now().strftime('%Y%m%d-%H%M%S-%f')
base_mod = ASYMPTOTICS_MOD + '/' + base
out_file = '{}-{}-{}.json'.format(date_str, heuristic.ID, bound.ID)
util.ensure_output_path(base_mod)
out_path = util.get_output_path(base_mod, out_file)
with open(out_path, 'w') as out_f:
out_f.write(json.dumps({'config': config, 'results': results}, indent=2))
print('-> done, saved to "{}"'.format(out_path))
def plot_treeverse(ns):
def treeverse(n):
if n == 0:
return 0
if n == 1:
return 1 + 1 # forward then backward
else:
m = n // 2
assert m * 2 == n
return m + 2 * treeverse(m)
tv_ns = sorted(set(map(lambda n: 2 ** (n.bit_length() - 1), ns)))
tv_rs = [treeverse(n) - n for n in tv_ns]
plt.plot(tv_ns, tv_rs, \
label=r'TREEVERSE ($B=\log_2(n)$, theoretical)', color='black', linestyle='--', alpha=0.8)
def plot_tq(ns):
plt.plot(ns, ns, \
label=r'Chen et al. ($2\sqrt{n}$, theoretical)', color='black', linestyle='--', alpha=0.8)
def run_runtime_comparison(base, ns, heuristic, bound):
for eager in [False, True]:
runtime = RuntimeV2EagerOptimized if eager else RuntimeV2Optimized
run_asymptotics(base, ns, heuristic, bound, runtime)
def run_heuristic_comparison(base, ns, heuristics, bound, runtime, releases=True, **kwargs):
for heuristic in heuristics:
run_asymptotics(base, ns, heuristic, bound, runtime, releases=releases, **kwargs)
def plot_runtime_comparison(base, heuristic, bound, out_file):
"""Compare different runtime settings on the same budget and heuristic."""
base_dir = util.get_output_path(ASYMPTOTICS_MOD + '/' + base, '')
results = []
paths = glob.glob(base_dir + '*.json')
for result_path in paths:
js = None
with open(result_path, 'r') as jf:
js = json.loads(jf.read())
assert js is not None
# if js['config']['runtime'] != 'V2': continue
if js['config']['heuristic'] != str(heuristic) and \
js['config']['heuristic'] != str(heuristic) + ' (Unoptimized)':
print('skipping "{}", since {} != {}'.format(result_path, js['config']['heuristic'], str(heuristic)))
continue
if js['config']['memory'] != str(bound):
print('skipping "{}", since {} != {}'.format(result_path, js['config']['memory'], str(bound)))
continue
assert len(results) == 0 or results[-1]['config']['releases'] == js['config']['releases']
assert len(results) == 0 or results[-1]['config']['kwargs'] == js['config']['kwargs']
results.append(js)
for res in results:
v1_banishing = res['config']['runtime'] == 'V1'
if not v1_banishing:
eager_evict = 'eager_evict' in res['config']['runtime_features']
if eager_evict:
runtime_label = 'Eager eviction'
else:
runtime_label = 'No eager eviction'
else:
runtime_label = 'Banishing'
l = plt.plot(res['results']['layers'], res['results']['computes'], label=runtime_label, alpha=0.7, marker='X')
l_color = l[0].get_color()
if res['results']['had_OOM']:
plt.axvline(x=res['results']['layers'][0], color=l_color, linestyle='dotted', \
alpha=0.3, linewidth=3)
if res['results']['had_thrash']:
plt.axvline(x=res['results']['layers'][-1], color=l_color, linestyle='--', \
alpha=0.3, linewidth=3)
if isinstance(bound, Log2Bound):
plot_treeverse(results[0]['config']['ns'])
else:
plot_tq(results[0]['config']['ns'])
plt.grid(True)
plt.xlabel(r'Number of Layers $n$')
plt.ylabel(r'Additional Compute')
plt.legend()
plt.title('Layers vs. Compute Overhead\n{} Heuristic, {} Memory'.format(str(heuristic), str(bound)))
plt.savefig(base_dir + out_file, dpi=300)
plt.clf()
def plot_heuristic_comparison(base, bound, runtime, out_file):
"""Compare different heuristics on the same runtime and budget."""
base_dir = util.get_output_path(ASYMPTOTICS_MOD + '/' + base, '')
results = []
for result_path in glob.glob(base_dir + '*.json'):
js = None
with open(result_path, 'r') as jf:
js = json.loads(jf.read())
assert js is not None
if js['config']['runtime'] != runtime.ID: continue
if js['config']['memory'] != str(bound): continue
assert len(results) == 0 or results[-1]['config']['releases'] == js['config']['releases']
assert len(results) == 0 or results[-1]['config']['ns'] == js['config']['ns']
results.append(js)
for res in results:
h_str = res['config']['heuristic']
h_color, h_marker = HEURISTICS[h_str].COLOR, HEURISTICS[h_str].MARKER
plt.plot(res['results']['layers'], res['results']['computes'], label=h_str, \
color=h_color, marker=h_marker, alpha=0.5, linewidth=3, ms=10)
if res['results']['had_OOM']:
plt.axvline(x=res['results']['layers'][0], color=h_color, linestyle='dotted', \
alpha=0.3, linewidth=3)
if res['results']['had_thrash'] is not None:
plt.axvline(x=res['results']['layers'][-1], color=h_color, linestyle='--', \
alpha=0.3, linewidth=3)
if isinstance(bound, Log2Bound):
plot_treeverse(results[0]['config']['ns'])
else:
plot_tq(results[0]['config']['ns'])
plt.grid(True)
plt.xlabel(r'Number of Layers $n$')
plt.ylabel(r'Additional Compute')
plt.legend()
plt.title('Layers vs. Compute Overhead ({} Memory)'.format(str(bound)))
plt.savefig(base_dir + out_file, dpi=300)
plt.clf()
| 38.119565 | 114 | 0.651554 |
0f9658613ed29b7cbc5de7f7e0fb0648f4c6ff54 | 389 | py | Python | djtbot/djtbot/wsgi.py | OldSem/djtbot | 04d92263889542e181009ffd5d36f7e1cb0ab460 | [
"MIT"
] | null | null | null | djtbot/djtbot/wsgi.py | OldSem/djtbot | 04d92263889542e181009ffd5d36f7e1cb0ab460 | [
"MIT"
] | null | null | null | djtbot/djtbot/wsgi.py | OldSem/djtbot | 04d92263889542e181009ffd5d36f7e1cb0ab460 | [
"MIT"
] | null | null | null | """
WSGI config for djtbot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djtbot.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
2740a7e1fe6a44e8520216981cd964336af20d8d | 702 | py | Python | test.py | linkdd/manyssh | 1f95d9abbf3215d115c627fad41cabcba02f5e28 | [
"MIT"
] | 3 | 2015-02-08T23:50:53.000Z | 2015-02-09T09:14:34.000Z | test.py | linkdd/manyssh | 1f95d9abbf3215d115c627fad41cabcba02f5e28 | [
"MIT"
] | 5 | 2015-02-09T09:20:20.000Z | 2015-02-09T09:41:29.000Z | test.py | linkdd/manyssh | 1f95d9abbf3215d115c627fad41cabcba02f5e28 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from manyssh.app import Application
import unittest
class ManySSHTest(unittest.TestCase):
def setUp(self):
self.argv = ['manyssh', 'mycluster0']
self.expected_hosts = [
'root@myhost0',
'root@myhost1',
'root@myhost2'
]
self.app = Application(self.argv)
self.app.load_config()
self.app.parse_commandline()
def test_manyssh(self):
self.assertEqual(
self.app.config.get('mycluster0', []),
self.expected_hosts
)
self.assertEqual(self.app.hosts, self.expected_hosts)
if __name__ == '__main__':
unittest.main()
| 21.9375 | 61 | 0.58547 |
0077d5c22a98c02651f9c9a062b4dfe0b52b6fbb | 8,530 | py | Python | grr/server/grr_response_server/output_plugins/bigquery_plugin_test.py | 4ndygu/grr | cfc725b5ee3a2626ac4cdae7fb14471612da4522 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/output_plugins/bigquery_plugin_test.py | 4ndygu/grr | cfc725b5ee3a2626ac4cdae7fb14471612da4522 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/output_plugins/bigquery_plugin_test.py | 4ndygu/grr | cfc725b5ee3a2626ac4cdae7fb14471612da4522 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for BigQuery output plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import gzip
import json
import os
from builtins import range # pylint: disable=redefined-builtin
from builtins import zip # pylint: disable=redefined-builtin
import mock
from grr_response_core import config
from grr_response_core.lib import flags
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import bigquery
from grr_response_server.output_plugins import bigquery_plugin
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class BigQueryOutputPluginTest(flow_test_lib.FlowTestsBaseclass):
"""Tests BigQuery hunt output plugin."""
def setUp(self):
super(BigQueryOutputPluginTest, self).setUp()
self.client_id = self.SetupClient(0)
self.results_urn = self.client_id.Add("Results")
def ProcessResponses(self,
plugin_args=None,
responses=None,
process_responses_separately=False):
plugin_cls = bigquery_plugin.BigQueryOutputPlugin
plugin, plugin_state = plugin_cls.CreatePluginAndDefaultState(
source_urn=self.results_urn, args=plugin_args, token=self.token)
messages = []
for response in responses:
messages.append(
rdf_flows.GrrMessage(source=self.client_id, payload=response))
with test_lib.FakeTime(1445995873):
with mock.patch.object(bigquery, "GetBigQueryClient") as mock_bigquery:
if process_responses_separately:
for message in messages:
plugin.ProcessResponses(plugin_state, [message])
else:
plugin.ProcessResponses(plugin_state, messages)
plugin.Flush(plugin_state)
plugin.UpdateState(plugin_state)
return [x[0] for x in mock_bigquery.return_value.InsertData.call_args_list]
def CompareSchemaToKnownGood(self, schema):
expected_schema_data = json.load(
open(
os.path.join(config.CONFIG["Test.data_dir"], "bigquery",
"ExportedFile.schema"), "rb"))
# It's easier to just compare the two dicts but even a change to the proto
# description requires you to fix the json so we just compare field names
# and types.
schema_fields = [(x["name"], x["type"]) for x in schema]
schema_metadata_fields = [
(x["name"], x["type"]) for x in schema[0]["fields"]
]
expected_fields = [(x["name"], x["type"]) for x in expected_schema_data]
expected_metadata_fields = [
(x["name"], x["type"]) for x in expected_schema_data[0]["fields"]
]
self.assertEqual(schema_fields, expected_fields)
self.assertEqual(schema_metadata_fields, expected_metadata_fields)
def testBigQueryPluginWithValuesOfSameType(self):
responses = []
for i in range(10):
responses.append(
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar/%d" % i, pathtype="OS"),
st_mode=33184, # octal = 100640 => u=rw,g=r,o= => -rw-r-----
st_ino=1063090,
st_dev=64512,
st_nlink=1 + i,
st_uid=139592,
st_gid=5000,
st_size=0,
st_atime=1336469177,
st_mtime=1336129892,
st_ctime=1336129892))
output = self.ProcessResponses(
plugin_args=bigquery_plugin.BigQueryOutputPluginArgs(),
responses=responses)
self.assertLen(output, 1)
_, stream, schema, job_id = output[0]
self.assertEqual(job_id,
"C-1000000000000000_Results_ExportedFile_1445995873")
self.CompareSchemaToKnownGood(schema)
actual_fd = gzip.GzipFile(
None, "r", bigquery_plugin.BigQueryOutputPlugin.GZIP_COMPRESSION_LEVEL,
stream)
# Compare to our stored data.
expected_fd = open(
os.path.join(config.CONFIG["Test.data_dir"], "bigquery",
"ExportedFile.json"), "rb")
# Bigquery expects a newline separarted list of JSON dicts, but this isn't
# valid JSON so we can't just load the whole thing and compare.
counter = 0
for actual, expected in zip(actual_fd, expected_fd):
self.assertEqual(json.loads(actual), json.loads(expected))
counter += 1
self.assertEqual(counter, 10)
def _parseOutput(self, name, stream):
content_fd = gzip.GzipFile(None, "r", 9, stream)
counter = 0
for item in content_fd:
counter += 1
row = json.loads(item)
if name == "ExportedFile":
self.assertEqual(row["metadata"]["client_urn"], self.client_id)
self.assertEqual(row["metadata"]["hostname"], "Host-0")
self.assertEqual(row["metadata"]["mac_address"],
"aabbccddee00\nbbccddeeff00")
self.assertEqual(row["metadata"]["source_urn"], self.results_urn)
self.assertEqual(row["urn"], self.client_id.Add("/fs/os/中国新闻网新闻中"))
else:
self.assertEqual(row["metadata"]["client_urn"], self.client_id)
self.assertEqual(row["metadata"]["hostname"], "Host-0")
self.assertEqual(row["metadata"]["mac_address"],
"aabbccddee00\nbbccddeeff00")
self.assertEqual(row["metadata"]["source_urn"], self.results_urn)
self.assertEqual(row["pid"], "42")
self.assertEqual(counter, 1)
def testBigQueryPluginWithValuesOfMultipleTypes(self):
output = self.ProcessResponses(
plugin_args=bigquery_plugin.BigQueryOutputPluginArgs(),
responses=[
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(path="/中国新闻网新闻中", pathtype="OS")),
rdf_client.Process(pid=42)
],
process_responses_separately=True)
# Should have two separate output streams for the two types
self.assertLen(output, 2)
for name, stream, _, job_id in output:
self.assertIn(job_id, [
"C-1000000000000000_Results_ExportedFile_1445995873",
"C-1000000000000000_Results_ExportedProcess_1445995873"
])
self._parseOutput(name, stream)
def testBigQueryPluginWithEarlyFlush(self):
responses = []
for i in range(10):
responses.append(
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar/%d" % i, pathtype="OS"),
st_mode=33184, # octal = 100640 => u=rw,g=r,o= => -rw-r-----
st_ino=1063090,
st_dev=64512,
st_nlink=1 + i,
st_uid=139592,
st_gid=5000,
st_size=0,
st_atime=1336469177,
st_mtime=1336129892,
st_ctime=1336129892))
sizes = [37, 687, 722, 755, 788, 821, 684, 719, 752, 785]
def GetSize(unused_path):
return sizes.pop(0)
# Force an early flush. Gzip is non deterministic since our
# metadata is a dict with unpredictable order so we make up the file sizes
# such that there is one flush during processing.
with test_lib.ConfigOverrider({"BigQuery.max_file_post_size": 800}):
with utils.Stubber(os.path, "getsize", GetSize):
output = self.ProcessResponses(
plugin_args=bigquery_plugin.BigQueryOutputPluginArgs(),
responses=responses)
self.assertLen(output, 2)
# Check that the output is still consistent
actual_fds = []
for _, stream, _, _ in output:
actual_fds.append(gzip.GzipFile(None, "r", 9, stream))
# Compare to our stored data.
# TODO(user): there needs to be a better way to generate these files on
# change than breaking into the debugger.
expected_fd = open(
os.path.join(config.CONFIG["Test.data_dir"], "bigquery",
"ExportedFile.json"), "rb")
# Check that the same entries we expect are spread across the two files.
counter = 0
for actual_fd in actual_fds:
for actual, expected in zip(actual_fd, expected_fd):
self.assertEqual(json.loads(actual), json.loads(expected))
counter += 1
self.assertEqual(counter, 10)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 35.690377 | 79 | 0.658499 |
17fe946d3ae68941942c4136dd2570342d7c48ee | 1,373 | py | Python | metaworld/policies/sawyer_peg_unplug_side_v1_policy.py | pkol/metaworld | 718e4d1bc2b34e0ae3ef6415fb6cbe4afe8ea4b9 | [
"MIT"
] | null | null | null | metaworld/policies/sawyer_peg_unplug_side_v1_policy.py | pkol/metaworld | 718e4d1bc2b34e0ae3ef6415fb6cbe4afe8ea4b9 | [
"MIT"
] | null | null | null | metaworld/policies/sawyer_peg_unplug_side_v1_policy.py | pkol/metaworld | 718e4d1bc2b34e0ae3ef6415fb6cbe4afe8ea4b9 | [
"MIT"
] | null | null | null | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerPegUnplugSideV1Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_xyz': obs[:3],
'peg_xyz': obs[3:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_pow': 3
})
action['delta_pos'] = move(o_d['hand_xyz'], to_xyz=self._desired_xyz(o_d), p=25.)
action['grab_pow'] = self._grab_pow(o_d)
return action.array
@staticmethod
def _desired_xyz(o_d):
pos_curr = o_d['hand_xyz']
pos_peg = o_d['peg_xyz'] + np.array([.005, .0, .015])
if np.linalg.norm(pos_curr[:2] - pos_peg[:2]) > 0.04:
return pos_peg + np.array([0., 0., 0.3])
elif abs(pos_curr[2] - pos_peg[2]) > 0.02:
return pos_peg
else:
return pos_peg + np.array([0.1, 0., 0.])
@staticmethod
def _grab_pow(o_d):
pos_curr = o_d['hand_xyz']
pos_peg = o_d['peg_xyz']
if np.linalg.norm(pos_curr[:2] - pos_peg[:2]) > 0.04 \
or abs(pos_curr[2] - pos_peg[2]) > 0.15:
return -1.
else:
return .7
| 26.403846 | 89 | 0.553532 |
58603738a0dcd513024889ba762e0c5b3de703d1 | 3,159 | py | Python | probe/modules/antivirus/fsecure/fsecure.py | quarkslab/irma | 29d8baa4e27bacaf7aa9dd570c16e5268ae6237c | [
"Apache-2.0"
] | 248 | 2015-01-08T09:36:44.000Z | 2022-01-12T10:29:21.000Z | probe/modules/antivirus/fsecure/fsecure.py | quarkslab/irma | 29d8baa4e27bacaf7aa9dd570c16e5268ae6237c | [
"Apache-2.0"
] | 50 | 2015-01-09T08:31:57.000Z | 2022-03-30T10:41:13.000Z | probe/modules/antivirus/fsecure/fsecure.py | quarkslab/irma | 29d8baa4e27bacaf7aa9dd570c16e5268ae6237c | [
"Apache-2.0"
] | 74 | 2015-01-05T09:11:21.000Z | 2022-03-29T02:16:54.000Z | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class FSecure(AntivirusUnix):
name = "FSecure Antivirus (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# scan tool variables
self.scan_args = (
"--allfiles=yes",
"--scanexecutables=yes",
"--archive=yes",
"--mime=yes",
# "--riskware=yes",
"--virus-action1=report",
# "--riskware-action1=report",
"--suspected-action1=report",
"--virus-action2=none",
# "--riskware-action2=none",
"--suspected-action2=none",
"--auto=yes",
"--list=no",
)
# see man zavcli for return codes
# fsav reports the exit codes in following priority order:
# 130, 7, 1, 3, 4, 8, 6, 9, 0.
# 0 Normal exit; no viruses or suspicious files found.
# 1 Fatal error (Usually a missing or corrupted file.)
# 3 A boot virus or file virus found.
# 4 Riskware (potential spyware) found.
# 6 At least one virus was removed and no infected files left.
# 7 Out of memory.
# 8 Suspicious files found (not necessarily infected by a virus)
# 9 Scan error, at least one file scan failed.
self._scan_retcodes[self.ScanResult.INFECTED] = \
lambda x: x in [3, 4, 6, 8]
self._scan_retcodes[self.ScanResult.ERROR] = lambda x: x in [1, 7, 9]
self.scan_patterns = [
re.compile('(?P<file>.*):\s+'
'(Infected|Suspected|Riskware):\s+'
'(?P<name>.*)', re.IGNORECASE),
]
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
return self._run_and_parse(
'--version',
regexp='(?P<version>\d+([.-]\d+)+)',
group='version')
def get_scan_path(self):
"""return the full path of the scan tool"""
return self.locate_one("fsav")
def get_virus_database_version(self):
"""Return the Virus Database version"""
return self._run_and_parse(
'--version',
regexp='Database version: (?P<dbversion>.*)',
group='dbversion')
| 34.714286 | 77 | 0.55334 |
26d36352b333e1031533bfaf3af2e5abf7b7906e | 4,512 | py | Python | wikiwho_chobj/chobj.py | gesiscss/wikiwho_chobj | b53be680a776a606bc46d923696193971a9e8276 | [
"MIT"
] | null | null | null | wikiwho_chobj/chobj.py | gesiscss/wikiwho_chobj | b53be680a776a606bc46d923696193971a9e8276 | [
"MIT"
] | null | null | null | wikiwho_chobj/chobj.py | gesiscss/wikiwho_chobj | b53be680a776a606bc46d923696193971a9e8276 | [
"MIT"
] | 1 | 2019-03-28T16:36:17.000Z | 2019-03-28T16:36:17.000Z | import os
import pickle
import datetime
from time import sleep
import numpy as np
from WikiWho.utils import iter_rev_tokens
from .revision import Revision
from .utils import Timer
class Chobjer:
def __init__(self, article, pickles_path, lang, context, starting_revid = -1):
from wikiwho import open_pickle
self.ww_pickle = open_pickle(
article, pickle_path=pickles_path, lang=lang)
self.article = article
self.context = context
self.revisions = self.ww_pickle.revisions
self.starting_revid = starting_revid
def get_revisions_dict(self):
revisions = self.revisions
return {
rev_id: Revision(
rev_id,
datetime.datetime.strptime(
revisions[rev_id].timestamp, r'%Y-%m-%dT%H:%M:%SZ'),
# revisions[rev_id].timestamp,
revisions[rev_id].editor)
for rev_id in self.ww_pickle.ordered_revisions if rev_id >= self.starting_revid
}
def get_one_revision(self, rev_id):
revisions = self.revisions
return Revision(
rev_id,
datetime.datetime.strptime(
revisions[rev_id].timestamp, r'%Y-%m-%dT%H:%M:%SZ'),
revisions[rev_id].editor)
def __iter_rev_content(self, rev_id):
yield ('{st@rt}', -1)
for word in iter_rev_tokens(self.revisions[rev_id]):
yield (word.value, word.token_id)
yield ('{$nd}', -2)
def __get_token_ids(self, rev_id):
yield -1
for word in iter_rev_tokens(self.revisions[rev_id]):
yield word.token_id
yield -2
def __get_values(self, rev_id):
yield '{st@rt}'
for word in iter_rev_tokens(self.revisions[rev_id]):
yield word.value
yield '{$nd}'
def add_all_tokens(self, revisions, tokens):
for token in tokens:
# token.str
if token.origin_rev_id >= self.starting_revid:
revisions[token.origin_rev_id].added.append(token.token_id)
for in_revision in token.inbound:
if in_revision >= self.starting_revid:
revisions[in_revision].added.append(token.token_id)
for out_revision in token.outbound:
if out_revision >= self.starting_revid:
revisions[out_revision].removed.append(token.token_id)
def iter_chobjs(self):
# get all the revisions
revs = self.get_revisions_dict()
revs_iter = iter(revs.items())
# prepare the first revision
from_rev_id, from_rev = next(revs_iter)
from_rev.from_id = None
# prepare the the next revisions (numpy arrays for tokens ids)
from_rev.tokens = np.fromiter(self.__get_token_ids(from_rev_id), int)
# the faster option for str is then python lists
from_rev.values = list(self.__get_values(from_rev_id))
# Adding the tokens to all revisions
self.add_all_tokens(revs, self.ww_pickle.tokens)
# adding content to all other revision and finding change objects
# between them
for to_rev_id, _ in revs_iter:
# the two revisions that will be compare
to_rev = revs[to_rev_id]
# make the revisions aware from the others ids
to_rev.from_id = from_rev_id
from_rev.to_id = to_rev.id
# prepare the the next revisions (numpy arrays for tokens ids)
to_rev.tokens = np.fromiter(self.__get_token_ids(to_rev_id), int)
# the faster option for str is then python lists
to_rev.values = list(self.__get_values(to_rev_id))
# complete the next revision
to_rev.inserted_continuous_pos()
for chobj in from_rev.iter_chobs(self.article, to_rev, self.context):
yield chobj
# the to revision becomes the from revision
# release memory
revs[from_rev_id] = None
from_rev_id = to_rev_id
# the to_revision will become the from revision in next iteration
from_rev = to_rev
def save(self, save_dir):
save_filepath = os.path.join(
save_dir, f"{self.article}_change.pkl")
with open(save_filepath, "wb") as file:
pickle.dump(self.wiki, file)
| 35.527559 | 92 | 0.598404 |
ee4372c897a986c41c4219ef18b542a5f5d62140 | 1,050 | py | Python | hummingbird/ml/operator_converters/sklearn/normalizer.py | gwd666/hummingbird | a39f0fbf4016754f8301dbaf54b0291b6a2e02e0 | [
"MIT"
] | 1 | 2020-12-29T12:51:10.000Z | 2020-12-29T12:51:10.000Z | hummingbird/ml/operator_converters/sklearn/normalizer.py | gwd666/hummingbird | a39f0fbf4016754f8301dbaf54b0291b6a2e02e0 | [
"MIT"
] | null | null | null | hummingbird/ml/operator_converters/sklearn/normalizer.py | gwd666/hummingbird | a39f0fbf4016754f8301dbaf54b0291b6a2e02e0 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Converter for scikit-learn Normalizer.
"""
from onnxconverter_common.registration import register_converter
from .._normalizer_implementations import Normalizer
def convert_sklearn_normalizer(operator, device, extra_config):
"""
Converter for `sklearn.preprocessing.Normalizer`
Args:
operator: An operator wrapping a `sklearn.preprocessing.Normalizer` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
return Normalizer(operator.raw_operator.norm, device)
register_converter("SklearnNormalizer", convert_sklearn_normalizer)
| 31.818182 | 90 | 0.655238 |
292b713d98ea0b06a8e9c831ff7f1551dc97412b | 639 | py | Python | fakerecurly/errors.py | davedevelopment/fake-recurly | f7ab365eb9d9b35560c7eb9b994fbd696929155b | [
"WTFPL"
] | 1 | 2016-05-30T13:52:29.000Z | 2016-05-30T13:52:29.000Z | fakerecurly/errors.py | davedevelopment/fake-recurly | f7ab365eb9d9b35560c7eb9b994fbd696929155b | [
"WTFPL"
] | null | null | null | fakerecurly/errors.py | davedevelopment/fake-recurly | f7ab365eb9d9b35560c7eb9b994fbd696929155b | [
"WTFPL"
] | null | null | null |
class FieldError(ValueError):
def __init__(self, resource, name, symbol):
self.resource = resource
self.name = name
self.symbol = symbol
def __unicode__(self):
msg = 'unknown'
if self.symbol == "taken":
msg = 'has already been taken'
return "<error field=\"%s.%s\" symbol=\"%s\">%s</error>" % (self.resource, self.name, self.symbol, msg)
class NotFoundError(ValueError):
def __init__(self, msg):
self.msg = msg
def __unicode__(self):
return "<error><symbol>not_found</symbol><description lang=\"en-US\">%s</description></error>" % (self.msg)
| 30.428571 | 115 | 0.604069 |
d2bf5ba6e10de5745a02651f06a8ad02f7159bce | 1,009 | py | Python | stroylux/main/core/sitemaps.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | null | null | null | stroylux/main/core/sitemaps.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | 7 | 2020-09-19T16:24:46.000Z | 2022-01-13T03:19:46.000Z | stroylux/main/core/sitemaps.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.sitemaps import Sitemap
from main.blog.models import BlogArticle
from main.page.models import Page
from main.product.models import Product, Category
class ProductSitemap(Sitemap):
name = 'product'
changefreq = 'daily'
limit = 100000
def items(self):
return Product.objects.published()
class CategorySitemap(Sitemap):
name = 'category'
changefreq = 'daily'
limit = 100000
def items(self):
return Category.objects.all()
class BlogArticleSitemap(Sitemap):
name = 'article'
changefreq = 'daily'
limit = 100000
def items(self):
return BlogArticle.objects.visible_to_user()
class PageSitemap(Sitemap):
name = 'page'
changefreq = 'daily'
limit = 100000
def items(self):
return Page.objects.published()
sitemaps = {
ProductSitemap.name: ProductSitemap,
CategorySitemap.name: CategorySitemap,
BlogArticleSitemap.name: BlogArticleSitemap,
PageSitemap.name: PageSitemap,
}
| 20.18 | 52 | 0.694747 |
5c2e5b6dbe04c7ad9bc390f3b6af464ccb8006ce | 1,184 | py | Python | main.py | trandinhquangloc/Watson-Language-Translator | 29cd8107a61fb32c4c0f9f0c0880e36c6e183f9c | [
"Apache-2.0"
] | null | null | null | main.py | trandinhquangloc/Watson-Language-Translator | 29cd8107a61fb32c4c0f9f0c0880e36c6e183f9c | [
"Apache-2.0"
] | null | null | null | main.py | trandinhquangloc/Watson-Language-Translator | 29cd8107a61fb32c4c0f9f0c0880e36c6e183f9c | [
"Apache-2.0"
] | null | null | null | import json
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import os
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
version_lt='2018-05-01'
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(version=version_lt,authenticator=authenticator) # Create an instant
language_translator.set_service_url(url)
# from pandas import json_normalize
# list_language=json_normalize(language_translator.list_identifiable_languages().get_result(), "languages")
# print(list_language)
def EnglishToFrench(englishText):
translation_response = language_translator.translate(text=englishText, model_id='en-fr')
frenchText=translation_response.get_result()
return frenchText
def FrenchToEnglish(frenchText):
translation_response = language_translator.translate(text=englishText, model_id='en-fr')
frenchText=translation_response.get_result()
return frenchText
englishText = "Hello World. I am Loc"
french_Text_Translate = EnglishToFrench(englishText)
print(french_Text_Translate['translations'][0]['translation'])
| 28.878049 | 111 | 0.810811 |
c2933b6f7a25f3d0dcf936ce7558fc7de1d0988f | 7,214 | py | Python | intents/test_show.py | chhavip/debaised-analysis | 3597d35ce74f8d20384d57f12f7eb65020f9370b | [
"Apache-2.0"
] | null | null | null | intents/test_show.py | chhavip/debaised-analysis | 3597d35ce74f8d20384d57f12f7eb65020f9370b | [
"Apache-2.0"
] | null | null | null | intents/test_show.py | chhavip/debaised-analysis | 3597d35ce74f8d20384d57f12f7eb65020f9370b | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""This module has some examples of function calls for queries
from IPL matches.csv
The query is mentiond in __doc__ of the function.
"""
import pandas
import show
from util.enums import *
def test_1():
"""An example from the IPL dataset
question : show all cities in season 2017 in the
date range 2008-05-08 to 2017-04-12
"""
table = pandas.read_csv('data/matches.csv')
query_result = show.show(table,
dimensions=['city'],
slices=[('season', Filters.EQUAL_TO, 2017)],
date_range=('2008-05-08', '2017-04-12'),
date_column_name='date',
date_format='%Y-%m-%d')
print(query_result);
expected_result = """ city
0 Hyderabad
1 Pune
2 Rajkot
3 Indore
4 Bangalore
5 Hyderabad
6 Mumbai
7 Indore
8 Pune
9 Mumbai"""
assert(expected_result == query_result.to_string())
def test_2():
"""An example from the IPL dataset
question :show player_of_match along with their average of win_by_runs
in season 2017 in date range '2017-05-09' to '2017-05-12'
"""
table = pandas.read_csv('data/matches.csv')
query_result = show.show(table,
dimensions=['player_of_match'],
metric='win_by_runs' ,
slices=[('season', Filters.EQUAL_TO, 2017)],
date_range=('2017-05-09', '2017-05-12'),
date_column_name='date', date_format='%Y-%m-%d',
summary_operator=SummaryOperators.MEAN)
print(query_result)
expected_result = """ player_of_match win_by_runs
0 KK Nair 7
1 MM Sharma 14
2 SS Iyer 0
3 WP Saha 7"""
assert(expected_result == query_result.to_string())
def test_3():
"""An example from the IPL dataset
question :show all the distinct seasons available in IPL dataset
"""
table = pandas.read_csv('data/matches.csv')
query_result = show.show(table,
dimensions=['season'],
summary_operator=SummaryOperators.DISTINCT)
print(query_result.to_string())
expected_result = """ season
0 2008
1 2009
2 2010
3 2011
4 2012
5 2013
6 2014
7 2015
8 2016
9 2017"""
assert(expected_result == query_result.to_string())
def test_4():
"""An example from the IPL dataset
question :show all matches where Royal Challengers Bangalore won the match in season 2008
"""
table = pandas.read_csv('data/matches.csv')
query_result = show.show(table,
slices=[('season', Filters.EQUAL_TO, 2008), ('winner', Filters.EQUAL_TO, 'Royal Challengers Bangalore')],
dimensions = ['team1','team2'],)
print(query_result.to_string())
expected_result = """ team1 team2
0 Mumbai Indians Royal Challengers Bangalore
1 Deccan Chargers Royal Challengers Bangalore
2 Royal Challengers Bangalore Chennai Super Kings
3 Royal Challengers Bangalore Deccan Chargers"""
assert(expected_result == query_result.to_string())
def test_5():
"""An example from the IPL dataset
question :show all the umpire1 of season 2017 in date range '2017-05-09'to '2017-05-12'
"""
table = pandas.read_csv('data/matches.csv')
query_result = show.show(table,
dimensions=['umpire1'],
slices=[('season', Filters.EQUAL_TO, 2017)],
date_range=('2017-05-09', '2017-05-12'),
date_column_name='date', date_format='%Y-%m-%d',
summary_operator=SummaryOperators.DISTINCT)
print(query_result.to_string())
expected_result = """ umpire1
0 A Deshmukh
1 A Nand Kishore
2 KN Ananthapadmanabhan
3 YC Barde"""
assert(expected_result == query_result.to_string())
def test_6():
"""An example from the IPL dataset
question :show the toss_winners of season 2017
"""
table = pandas.read_csv('data/matches.csv')
query_result = show.show(table,
dimensions=['toss_winner'],
slices=[('season', Filters.EQUAL_TO, 2017)],)
print(query_result.to_string())
expected_result = """ toss_winner
0 Royal Challengers Bangalore
1 Rising Pune Supergiant
2 Kolkata Knight Riders
3 Kings XI Punjab
4 Royal Challengers Bangalore
5 Sunrisers Hyderabad
6 Mumbai Indians
7 Royal Challengers Bangalore
8 Rising Pune Supergiant
9 Mumbai Indians
10 Kolkata Knight Riders
11 Mumbai Indians
12 Gujarat Lions
13 Sunrisers Hyderabad
14 Delhi Daredevils
15 Mumbai Indians
16 Royal Challengers Bangalore
17 Delhi Daredevils
18 Kings XI Punjab
19 Gujarat Lions
20 Sunrisers Hyderabad
21 Mumbai Indians
22 Gujarat Lions
23 Delhi Daredevils
24 Rising Pune Supergiant
25 Gujarat Lions
26 Royal Challengers Bangalore
27 Mumbai Indians
28 Kolkata Knight Riders
29 Gujarat Lions
30 Kolkata Knight Riders
31 Kings XI Punjab
32 Royal Challengers Bangalore
33 Gujarat Lions
34 Kings XI Punjab
35 Kolkata Knight Riders
36 Royal Challengers Bangalore
37 Rising Pune Supergiant
38 Delhi Daredevils
39 Rising Pune Supergiant
40 Delhi Daredevils
41 Royal Challengers Bangalore
42 Sunrisers Hyderabad
43 Delhi Daredevils
44 Kolkata Knight Riders
45 Gujarat Lions
46 Mumbai Indians
47 Kolkata Knight Riders
48 Delhi Daredevils
49 Mumbai Indians
50 Delhi Daredevils
51 Sunrisers Hyderabad
52 Kolkata Knight Riders
53 Rising Pune Supergiant
54 Royal Challengers Bangalore
55 Mumbai Indians
56 Kolkata Knight Riders
57 Mumbai Indians
58 Mumbai Indians"""
assert(expected_result == query_result.to_string())
print(test_1.__doc__)
test_1()
print(test_2.__doc__)
test_2()
print(test_3.__doc__)
test_3()
print(test_4.__doc__)
test_4()
print(test_5.__doc__)
test_5()
print(test_6.__doc__)
test_6()
print("Test cases completed") | 32.940639 | 134 | 0.605905 |
5850ad2b9ae673cd78064e2045e2cd134727e09a | 17,759 | py | Python | sdk/python/pulumi_azure_native/documentdb/v20210415/cassandra_resource_cassandra_keyspace.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20210415/cassandra_resource_cassandra_keyspace.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20210415/cassandra_resource_cassandra_keyspace.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CassandraResourceCassandraKeyspaceArgs', 'CassandraResourceCassandraKeyspace']
@pulumi.input_type
class CassandraResourceCassandraKeyspaceArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource: pulumi.Input['CassandraKeyspaceResourceArgs'],
resource_group_name: pulumi.Input[str],
keyspace_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input['CreateUpdateOptionsArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a CassandraResourceCassandraKeyspace resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input['CassandraKeyspaceResourceArgs'] resource: The standard JSON format of a Cassandra keyspace
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] keyspace_name: Cosmos DB keyspace name.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input['CreateUpdateOptionsArgs'] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if keyspace_name is not None:
pulumi.set(__self__, "keyspace_name", keyspace_name)
if location is not None:
pulumi.set(__self__, "location", location)
if options is not None:
pulumi.set(__self__, "options", options)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['CassandraKeyspaceResourceArgs']:
"""
The standard JSON format of a Cassandra keyspace
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['CassandraKeyspaceResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="keyspaceName")
def keyspace_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB keyspace name.
"""
return pulumi.get(self, "keyspace_name")
@keyspace_name.setter
def keyspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "keyspace_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input['CreateUpdateOptionsArgs']]:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input['CreateUpdateOptionsArgs']]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class CassandraResourceCassandraKeyspace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
keyspace_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['CassandraKeyspaceResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
An Azure Cosmos DB Cassandra keyspace.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] keyspace_name: Cosmos DB keyspace name.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['CassandraKeyspaceResourceArgs']] resource: The standard JSON format of a Cassandra keyspace
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CassandraResourceCassandraKeyspaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB Cassandra keyspace.
:param str resource_name: The name of the resource.
:param CassandraResourceCassandraKeyspaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CassandraResourceCassandraKeyspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
keyspace_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['CassandraKeyspaceResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CassandraResourceCassandraKeyspaceArgs.__new__(CassandraResourceCassandraKeyspaceArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["keyspace_name"] = keyspace_name
__props__.__dict__["location"] = location
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20150401:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150401:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20150408:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20151106:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20160319:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20160331:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20190801:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20191212:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20200301:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20200401:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20200901:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20210115:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20210315:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:CassandraResourceCassandraKeyspace"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:CassandraResourceCassandraKeyspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CassandraResourceCassandraKeyspace, __self__).__init__(
'azure-native:documentdb/v20210415:CassandraResourceCassandraKeyspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CassandraResourceCassandraKeyspace':
"""
Get an existing CassandraResourceCassandraKeyspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CassandraResourceCassandraKeyspaceArgs.__new__(CassandraResourceCassandraKeyspaceArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["options"] = None
__props__.__dict__["resource"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return CassandraResourceCassandraKeyspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> pulumi.Output[Optional['outputs.CassandraKeyspaceGetPropertiesResponseOptions']]:
return pulumi.get(self, "options")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.CassandraKeyspaceGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| 61.237931 | 3,128 | 0.707134 |
eb994be7acf51860657a1d74f1ae1d5c02cb0e89 | 3,676 | py | Python | unifier/apps/core/admin.py | sosolidkk/manga-unifier | 4cca148affbb197b9284d46ef04c66d42d96c03a | [
"MIT"
] | 6 | 2021-03-25T14:55:36.000Z | 2021-05-25T15:12:41.000Z | unifier/apps/core/admin.py | sosolidkk/manga-unifier | 4cca148affbb197b9284d46ef04c66d42d96c03a | [
"MIT"
] | 6 | 2021-02-19T12:32:26.000Z | 2021-03-25T16:54:40.000Z | unifier/apps/core/admin.py | sosolidkk/manga-unifier | 4cca148affbb197b9284d46ef04c66d42d96c03a | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.admin.models import DELETION, LogEntry
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from unifier.apps.core.models import Favorite, Manga, MangaChapter, Novel, NovelChapter, Platform, get_user_model
@admin.register(LogEntry)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = "action_time"
list_filter = ["user", "content_type", "action_flag"]
search_fields = ["object_repr", "change_message"]
list_display = [
"action_time",
"user",
"content_type",
"object_link",
"action_flag",
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_view_permission(self, request, obj=None):
return request.user.is_superuser
def object_link(self, obj):
if obj.action_flag == DELETION:
link = escape(obj.object_repr)
else:
ct = obj.content_type
link = "<a href='%s'>%s</a>" % (
reverse("admin:%s_%s_change" % (ct.app_label, ct.model), args=[obj.object_id]),
escape(obj.object_repr),
)
return mark_safe(link)
object_link.admin_order_field = "object_repr"
object_link.short_description = "object"
class NovelChapterInline(admin.TabularInline):
model = NovelChapter
@admin.register(Manga)
class MangaAdmin(admin.ModelAdmin):
list_display = (
"title",
"year",
"author",
"chapters_count",
"rate",
"cover_tag",
"is_mature",
)
list_filter = (
"created_at",
"updated_at",
)
search_fields = (
"title",
"year",
)
@admin.register(MangaChapter)
class MangaChapterAdmin(admin.ModelAdmin):
list_display = (
"number",
"title",
"language",
"get_manga_title",
)
list_filter = (
"manga__title",
"created_at",
"updated_at",
)
search_fields = (
"title",
"manga__title",
)
def get_manga_title(self, obj):
return obj.manga.title
get_manga_title.admin_order_field = "manga"
get_manga_title.short_description = "Manga Title"
@admin.register(Novel)
class NovelAdmin(admin.ModelAdmin):
list_display = (
"title",
"year",
"author",
"chapters_count",
"rate",
"cover_tag",
"is_mature",
)
list_filter = (
"created_at",
"updated_at",
"year",
)
search_fields = (
"title",
"year",
)
inlines = (NovelChapterInline,)
@admin.register(NovelChapter)
class NovelChapterAdmin(admin.ModelAdmin):
list_display = (
"number",
"title",
"language",
)
list_filter = (
"created_at",
"updated_at",
)
search_fields = ("title",)
@admin.register(Platform)
class PlatformAdmin(admin.ModelAdmin):
list_display = (
"url",
"name",
)
list_filter = (
"created_at",
"updated_at",
"name",
)
search_fields = ("name",)
class FavoriteInline(admin.StackedInline):
model = Favorite
can_delete = False
verbose_name_plural = "favorites"
class UserAdmin(BaseUserAdmin):
inlines = (FavoriteInline,)
admin.site.unregister(get_user_model())
admin.site.register(get_user_model(), UserAdmin)
| 22.414634 | 113 | 0.60691 |
d51f8496b8a4bb2c2d612403f479aa0b3b3c4f48 | 4,604 | py | Python | Lib/site-packages/OCC/StdFail.py | JWerbrouck/RWTH_M1_Projekt | 7ae63a2277361fa3273cf0677b297379482b8240 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/OCC/StdFail.py | JWerbrouck/RWTH_M1_Projekt | 7ae63a2277361fa3273cf0677b297379482b8240 | [
"bzip2-1.0.6"
] | 1 | 2022-03-17T16:46:04.000Z | 2022-03-17T16:46:04.000Z | Lib/site-packages/OCC/StdFail.py | JWerbrouck/RWTH_M1_Projekt | 7ae63a2277361fa3273cf0677b297379482b8240 | [
"bzip2-1.0.6"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _StdFail.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_StdFail', [dirname(__file__)])
except ImportError:
import _StdFail
return _StdFail
if fp is not None:
try:
_mod = imp.load_module('_StdFail', fp, pathname, description)
finally:
fp.close()
return _mod
_StdFail = swig_import_helper()
del swig_import_helper
else:
import _StdFail
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _StdFail.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_StdFail.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_StdFail.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_StdFail.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_StdFail.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_StdFail.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_StdFail.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_StdFail.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_StdFail.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_StdFail.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_StdFail.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_StdFail.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_StdFail.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_StdFail.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_StdFail.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_StdFail.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_StdFail.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _StdFail.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
| 40.743363 | 107 | 0.746525 |
5bcd0b290564bc701c47ceb3606536a76c4997ce | 236 | py | Python | artifacts/add_deepfry.py | gewenyu99/Patina | f7feece9242cc25b5a2decc49505f5fd2942975f | [
"MIT"
] | 1 | 2022-01-28T10:16:56.000Z | 2022-01-28T10:16:56.000Z | artifacts/add_deepfry.py | gewenyu99/Patina | f7feece9242cc25b5a2decc49505f5fd2942975f | [
"MIT"
] | null | null | null | artifacts/add_deepfry.py | gewenyu99/Patina | f7feece9242cc25b5a2decc49505f5fd2942975f | [
"MIT"
] | null | null | null | from PIL import Image
import deeppyer, asyncio
async def fry():
img = Image.open('../mqdefault.jpg')
img = await deeppyer.deepfry(img)
img.save('./fried.jpg')
loop = asyncio.get_event_loop()
loop.run_until_complete(fry())
| 21.454545 | 40 | 0.699153 |
27fd496ac9e553e521f720f23e6ca86d1be6ca0e | 117,287 | py | Python | fortnitepy/client.py | xMistt/fortnitepy | c64d72572e188a938e0b39a6d1fd1e8ee4842d31 | [
"MIT"
] | 2 | 2020-06-14T21:23:46.000Z | 2022-02-03T08:55:37.000Z | fortnitepy/client.py | xMistt/fortnitepy | c64d72572e188a938e0b39a6d1fd1e8ee4842d31 | [
"MIT"
] | null | null | null | fortnitepy/client.py | xMistt/fortnitepy | c64d72572e188a938e0b39a6d1fd1e8ee4842d31 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019-2021 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import asyncio
import sys
import signal
import logging
import time
from aioxmpp import JID
from typing import Union, Optional, Any, Awaitable, Callable, Dict, List, Tuple
from .errors import (PartyError, HTTPException, NotFound, Forbidden,
DuplicateFriendship, FriendshipRequestAlreadySent,
MaxFriendshipsExceeded, InviteeMaxFriendshipsExceeded,
InviteeMaxFriendshipRequestsExceeded)
from .xmpp import XMPPClient
from .http import HTTPClient
from .user import (ClientUser, User, BlockedUser, SacSearchEntryUser,
UserSearchEntry)
from .friend import Friend, IncomingPendingFriend, OutgoingPendingFriend
from .enums import (Platform, Region, UserSearchPlatform, AwayStatus,
SeasonStartTimestamp, SeasonEndTimestamp,
BattlePassStat, StatsCollectionType)
from .party import (DefaultPartyConfig, DefaultPartyMemberConfig, ClientParty,
Party)
from .stats import StatsV2, StatsCollection, _StatsBase
from .store import Store
from .news import BattleRoyaleNewsPost
from .playlist import Playlist
from .presence import Presence
from .auth import RefreshTokenAuth
from .kairos import Avatar, get_random_default_avatar
from .typedefs import MaybeCoro, DatetimeOrTimestamp, StrOrInt
from .utils import LockEvent, MaybeLock
log = logging.getLogger(__name__)
# all credit for this function goes to discord.py.
def _cancel_tasks(loop: asyncio.AbstractEventLoop) -> None:
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
task_retriever = asyncio.all_tasks
tasks = {t for t in task_retriever(loop=loop) if not t.done()}
if not tasks:
return
log.info('Cleaning up after %d tasks.', len(tasks))
for task in tasks:
task.cancel()
loop.run_until_complete(
asyncio.gather(*tasks, return_exceptions=True)
)
log.info('All tasks finished cancelling.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during run shutdown.',
'exception': task.exception(),
'task': task
})
def _cleanup_loop(loop: asyncio.AbstractEventLoop) -> None:
try:
_cancel_tasks(loop)
if sys.version_info >= (3, 6):
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
async def _start_client(client: 'Client', *,
shutdown_on_error: bool = True,
after: Optional[MaybeCoro] = None,
error_after: Optional[MaybeCoro] = None,
) -> None:
loop = asyncio.get_event_loop()
if not isinstance(client, Client):
raise TypeError('client must be an instance of fortnitepy.Client')
async def starter():
try:
await client.start()
except Exception as e:
return e
tasks = (
loop.create_task(starter()),
loop.create_task(client.wait_until_ready())
)
try:
done, pending = await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED
)
except asyncio.CancelledError:
for task in tasks:
task.cancel()
else:
done_task = done.pop()
e = done_task.result()
if e is not None:
await client.close()
identifier = client.auth.identifier
if shutdown_on_error:
if e.args:
e.args = ('{0} - {1}'.format(identifier, e.args[0]),)
else:
e.args = (identifier,)
raise e
else:
if error_after is not None:
if asyncio.iscoroutinefunction(after):
asyncio.ensure_future(error_after(client, e))
else:
error_after(client, e)
return
message = ('An exception occured while running client '
'{0}'.format(identifier))
return loop.call_exception_handler({
'message': message,
'exception': e,
'task': done_task
})
if after:
if asyncio.iscoroutinefunction(after):
asyncio.ensure_future(after(client))
else:
after(client)
await pending.pop()
def _before_event(callback):
event = asyncio.Event()
is_processing = False
async def processor():
nonlocal is_processing
if not is_processing:
is_processing = True
try:
await callback()
finally:
event.set()
else:
await event.wait()
return processor
async def start_multiple(clients: List['Client'], *,
gap_timeout: float = 0.2,
shutdown_on_error: bool = True,
ready_callback: Optional[MaybeCoro] = None,
error_callback: Optional[MaybeCoro] = None,
all_ready_callback: Optional[MaybeCoro] = None,
before_start: Optional[Awaitable] = None,
before_close: Optional[Awaitable] = None
) -> None:
"""|coro|
Starts multiple clients at the same time.
.. warning::
This function is blocking and should be the last function to run.
.. info::
Due to throttling by epicgames on login, the clients are started
with a 0.2 second gap. You can change this value with the gap_timeout
keyword argument.
Parameters
----------
clients: List[:class:`Client`]
A list of the clients you wish to start.
gap_timeout: :class:`float`
The time to sleep between starting clients. Defaults to ``0.2``.
shutdown_on_error: :class:`bool`
If the function should cancel all other start tasks if one of the
tasks fails. You can catch the error by try excepting.
ready_callback: Optional[Union[Callable[:class:`Client`], Awaitable[:class:`Client`]]]
A callable/async callback taking a single parameter ``client``.
The callback is called whenever a client is ready.
error_callback: Optional[Union[Callable[:class:`Client`, Exception], Awaitable[:class:`Client`, Exception]]]
A callable/async callback taking two parameters, :class:`Client`
and an exception. The callback is called whenever a client fails
logging in. The callback is not called if ``shutdown_on_error`` is
``True``.
all_ready_callback: Optional[Union[Callable, Awaitable]]
A callback/async callback that is called whenever all clients
have finished logging in, regardless if one of the clients failed
logging in. That means that the callback is always called when all
clients are either logged in or raised an error.
before_start: Optional[Awaitable]
An async callback that is called when just before the clients are
beginning to start. This must be a coroutine as all the clients
wait to start until this callback is finished processing so you
can do heavy start stuff like opening database connections, sessions
etc.
before_close: Optional[Awaitable]
An async callback that is called when the clients are beginning to
close. This must be a coroutine as all the clients wait to close until
this callback is finished processing so you can do heavy close stuff
like closing database connections, sessions etc.
Raises
------
AuthException
Raised if invalid credentials in any form was passed or some
other misc failure.
HTTPException
A request error occured while logging in.
""" # noqa
loop = asyncio.get_event_loop()
async def waiter(client):
done, pending = await asyncio.wait(
(client.wait_until_ready(), client.wait_until_closed()),
return_when=asyncio.FIRST_COMPLETED
)
for task in pending:
task.cancel()
async def all_ready_callback_runner():
tasks = [loop.create_task(waiter(client))
for client in clients]
await asyncio.gather(*tasks)
if all(client.is_closed() for client in clients):
return
log.info('All clients started.')
if all_ready_callback:
if asyncio.iscoroutinefunction(all_ready_callback):
asyncio.ensure_future(all_ready_callback())
else:
all_ready_callback()
asyncio.ensure_future(all_ready_callback_runner())
_before_start = _before_event(before_start)
_before_close = _before_event(before_close)
tasks = {}
for i, client in enumerate(clients, 1):
tasks[client] = loop.create_task(_start_client(
client,
shutdown_on_error=shutdown_on_error,
after=ready_callback,
error_after=error_callback
))
if before_start is not None:
client.add_event_handler('before_start', _before_start)
if before_close is not None:
client.add_event_handler('before_close', _before_close)
# sleeping between starting to avoid throttling
if i < len(clients):
await asyncio.sleep(gap_timeout)
log.debug('Starting all clients')
return_when = (asyncio.FIRST_EXCEPTION
if shutdown_on_error
else asyncio.ALL_COMPLETED)
done, pending = await asyncio.wait(
list(tasks.values()),
return_when=return_when
)
done_task = done.pop()
if pending and done_task.exception() is not None:
raise done_task.exception()
async def close_multiple(clients: List['Client']) -> None:
"""|coro|
Closes multiple clients at the same time by calling :meth:`Client.close()`
on all of them.
Parameters
----------
clients: List[:class:`Client`]
A list of the clients you wish to close. If a client is already closing
or closed, it will get skipped without raising an error.
"""
loop = asyncio.get_event_loop()
tasks = [
loop.create_task(client.close()) for client in clients
if not client._closing and not client.is_closed()
]
await asyncio.gather(*tasks)
def run_multiple(clients: List['Client'], *,
gap_timeout: float = 0.2,
shutdown_on_error: bool = True,
ready_callback: Optional[MaybeCoro] = None,
error_callback: Optional[MaybeCoro] = None,
all_ready_callback: Optional[MaybeCoro] = None,
before_start: Optional[Awaitable] = None,
before_close: Optional[Awaitable] = None
) -> None:
"""This function sets up a loop and then calls :func:`start_multiple()`
for you. If you already have a running event loop, you should start
the clients with :func:`start_multiple()`. On shutdown, all clients
will be closed gracefully.
.. warning::
This function is blocking and should be the last function to run.
.. info::
Due to throttling by epicgames on login, the clients are started
with a 0.2 second gap. You can change this value with the gap_timeout
keyword argument.
Parameters
----------
clients: List[:class:`Client`]
A list of the clients you wish to start.
gap_timeout: :class:`float`
The time to sleep between starting clients. Defaults to ``0.2``.
shutdown_on_error: :class:`bool`
If the function should cancel all other start tasks if one of the
tasks fails. You can catch the error by try excepting.
ready_callback: Optional[Union[Callable[:class:`Client`], Awaitable[:class:`Client`]]]
A callable/async callback taking a single parameter ``client``.
The callback is called whenever a client is ready.
error_callback: Optional[Union[Callable[:class:`Client`, Exception], Awaitable[:class:`Client`, Exception]]]
A callable/async callback taking two parameters, :class:`Client`
and an exception. The callback is called whenever a client fails
logging in. The callback is not called if ``shutdown_on_error`` is
``True``.
all_ready_callback: Optional[Union[Callable, Awaitable]]
A callback/async callback that is called whenever all clients
have finished logging in, regardless if one of the clients failed
logging in. That means that the callback is always called when all
clients are either logged in or raised an error.
before_start: Optional[Awaitable]
An async callback that is called when just before the clients are
beginning to start. This must be a coroutine as all the clients
wait to start until this callback is finished processing so you
can do heavy start stuff like opening database connections, sessions
etc.
before_close: Optional[Awaitable]
An async callback that is called when the clients are beginning to
close. This must be a coroutine as all the clients wait to close until
this callback is finished processing so you can do heavy close stuff
like closing database connections, sessions etc.
Raises
------
AuthException
Raised if invalid credentials in any form was passed or some
other misc failure.
HTTPException
A request error occured while logging in.
""" # noqa
loop = asyncio.get_event_loop()
_closing = False
_stopped = False
def close(*args):
nonlocal _closing
def stopper(*argss):
nonlocal _stopped
if not _stopped:
loop.stop()
_stopped = True
if not _closing:
_closing = True
fut = asyncio.ensure_future(close_multiple(clients))
fut.add_done_callback(stopper)
try:
loop.add_signal_handler(signal.SIGINT, close)
loop.add_signal_handler(signal.SIGTERM, close)
except NotImplementedError:
pass
async def runner():
await start_multiple(
clients,
gap_timeout=gap_timeout,
shutdown_on_error=shutdown_on_error,
ready_callback=ready_callback,
error_callback=error_callback,
all_ready_callback=all_ready_callback,
before_start=before_start,
before_close=before_close,
)
future = asyncio.ensure_future(runner())
future.add_done_callback(close)
try:
loop.run_forever()
except KeyboardInterrupt:
if not _stopped:
loop.run_until_complete(close_multiple(clients))
finally:
future.remove_done_callback(close)
log.info('Cleaning up loop')
_cleanup_loop(loop)
if not future.cancelled():
return future.result()
class Client:
"""Represents the client connected to Fortnite and EpicGames' services.
Parameters
----------
auth: :class:`Auth`
The authentication method to use. You can read more about available authentication methods
:ref:`here <authentication>`.
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The event loop to use for asynchronous operations.
connector: :class:`aiohttp.BaseConnector`
The connector to use for connection pooling.
ws_connector: :class:`aiohttp.BaseConnector`
The connector to use for websocket connection pooling. This could be
the same as the above connector.
status: :class:`str`
The status you want the client to send with its presence to friends.
Defaults to: ``Battle Royale Lobby - {party playercount} / {party max playercount}``
away: :class:`AwayStatus`
The away status the client should use for its presence. Defaults to
:attr:`AwayStatus.ONLINE`.
platform: :class:`.Platform`
The platform you want the client to display as its source.
Defaults to :attr:`Platform.WINDOWS`.
net_cl: :class:`str`
The current net cl used by the current Fortnite build. Named **netCL**
in official logs. Defaults to an empty string which is the recommended
usage as of ``v0.9.0`` since you then
won't need to update it when a new update is pushed by Fortnite.
party_version: :class:`int`
The party version the client should use. This value determines which version
should be able to join the party. If a user attempts to join the clients party
with a different party version than the client, then an error will be visible
saying something by the lines of "Their party of Fortnite is older/newer than
yours". If you experience this error I recommend incrementing the default set
value by one since the library in that case most likely has yet to be updated.
Defaults to ``3`` (As of November 3rd 2020).
default_party_config: :class:`DefaultPartyConfig`
The party configuration used when creating parties. If not specified,
the client will use the default values specified in the data class.
default_party_member_config: :class:`DefaultPartyMemberConfig`
The party member configuration used when creating parties. If not specified,
the client will use the default values specified in the data class.
http_retry_config: Optional[:class:`HTTPRetryConfig`]
The config to use for http retries.
build: :class:`str`
The build used by Fortnite.
Defaults to a valid but maybe outdated value.
os: :class:`str`
The os version string to use in the user agent.
Defaults to ``Windows/10.0.17134.1.768.64bit`` which is valid no
matter which platform you have set.
service_host: :class:`str`
The host used by Fortnite's XMPP services.
service_domain: :class:`str`
The domain used by Fortnite's XMPP services.
service_port: :class:`int`
The port used by Fortnite's XMPP services.
cache_users: :class:`bool`
Whether or not the library should cache :class:`User` objects. Disable
this if you are running a program with lots of users as this could
potentially take a big hit on the memory usage. Defaults to ``True``.
fetch_user_data_in_events: :class:`bool`
Whether or not user data should be fetched in event processing. Disabling
this might be useful for larger applications that deals with
possibly being rate limited on their ip. Defaults to ``True``.
.. warning::
Keep in mind that if this option is disabled, there is a big
chance that display names, external auths and more might be missing
or simply is ``None`` on objects deriving from :class:`User`. Keep in
mind that :attr:`User.id` always will be available. You can use
:meth:`User.fetch()` to update all missing attributes.
Attributes
----------
loop: :class:`asyncio.AbstractEventLoop`
The event loop that client implements.
user: :class:`ClientUser`
The user the client is logged in as.
party: :class:`ClientParty`
The party the client is currently connected to.
""" # noqa
def __init__(self, auth, *,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any) -> None:
self.loop = loop or asyncio.get_event_loop()
self.status = kwargs.get('status', 'Battle Royale Lobby - {party_size} / {party_max_size}') # noqa
self.away = kwargs.get('away', AwayStatus.ONLINE)
self.avatar = kwargs.get('avatar', get_random_default_avatar()) # noqa
self.platform = kwargs.get('platform', Platform.WINDOWS)
self.net_cl = kwargs.get('net_cl', '')
self.party_version = kwargs.get('party_version', 3)
self.party_build_id = '1:{0.party_version}:{0.net_cl}'.format(self)
self.default_party_config = kwargs.get('default_party_config', DefaultPartyConfig()) # noqa
self.default_party_member_config = kwargs.get('default_party_member_config', DefaultPartyMemberConfig()) # noqa
self.build = kwargs.get('build', '++Fortnite+Release-14.10-CL-14288110') # noqa
self.os = kwargs.get('os', 'Windows/10.0.17134.1.768.64bit')
self.service_host = kwargs.get('xmpp_host', 'prod.ol.epicgames.com')
self.service_domain = kwargs.get('xmpp_domain', 'xmpp-service-prod.ol.epicgames.com') # noqa
self.service_port = kwargs.get('xmpp_port', 5222)
self.cache_users = kwargs.get('cache_users', True)
self.fetch_user_data_in_events = kwargs.get('fetch_user_data_in_events', True) # noqa
self.kill_other_sessions = True
self.accept_eula = True
self.event_prefix = 'event_'
self.auth = auth
self.auth.initialize(self)
self.http = HTTPClient(
self,
connector=kwargs.get('connector'),
retry_config=kwargs.get('http_retry_config')
)
self.http.add_header('Accept-Language', 'en-EN')
self.xmpp = XMPPClient(self, ws_connector=kwargs.get('ws_connector'))
self.party = None
self._listeners = {}
self._events = {}
self._friends = {}
self._pending_friends = {}
self._users = {}
self._blocked_users = {}
self._presences = {}
self._exception_future = self.loop.create_future()
self._ready_event = asyncio.Event()
self._closed_event = asyncio.Event()
self._join_party_lock = LockEvent()
self._internal_join_party_lock = LockEvent()
self._reauth_lock = LockEvent()
self._reauth_lock.failed = False
self._refresh_task = None
self._start_runner_task = None
self._closed = False
self._closing = False
self._restarting = False
self._first_start = True
self._join_confirmation = False
self._refresh_times = []
self.setup_internal()
@staticmethod
def from_iso(iso: str) -> datetime.datetime:
"""Converts an iso formatted string to a
:class:`datetime.datetime` object
Parameters
----------
iso: :class:`str`:
The iso formatted string to convert to a datetime object.
Returns
-------
:class:`datetime.datetime`
"""
if isinstance(iso, datetime.datetime):
return iso
try:
return datetime.datetime.strptime(iso, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.datetime.strptime(iso, '%Y-%m-%dT%H:%M:%SZ')
@staticmethod
def to_iso(dt: datetime.datetime) -> str:
"""Converts a :class:`datetime.datetime`
object to an iso formatted string
Parameters
----------
dt: :class:`datetime.datetime`
The datetime object to convert to an iso formatted string.
Returns
-------
:class:`str`
"""
iso = dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
# fortnite's services expect three digit precision on millis
return iso[:23] + 'Z'
@property
def default_party_config(self) -> DefaultPartyConfig:
return self._default_party_config
@default_party_config.setter
def default_party_config(self, obj: DefaultPartyConfig) -> None:
obj._inject_client(self)
self._default_party_config = obj
@property
def default_party_member_config(self) -> DefaultPartyMemberConfig:
return self._default_party_member_config
@default_party_member_config.setter
def default_party_member_config(self, o: DefaultPartyMemberConfig) -> None:
self._default_party_member_config = o
@property
def friends(self) -> List[Friend]:
"""List[:class:`Friend`]: A list of the clients friends."""
return list(self._friends.values())
@property
def pending_friends(self) -> List[Union[IncomingPendingFriend,
OutgoingPendingFriend]]:
"""List[Union[:class:`IncomingPendingFriend`,
:class:`OutgoingPendingFriend`]]: A list of all of the clients
pending friends.
.. note::
Pending friends can be both incoming (pending friend sent the
request to the bot) or outgoing (the bot sent the request to the
pending friend). You must check what kind of pending friend an
object is by their attributes ``incoming`` or ``outgoing``.
""" # noqa
return list(self._pending_friends.values())
@property
def incoming_pending_friends(self) -> List[IncomingPendingFriend]:
"""List[:class:`IncomingPendingFriend`]: A list of the clients
incoming pending friends.
"""
return [pf for pf in self._pending_friends.values() if pf.incoming]
@property
def outgoing_pending_friends(self) -> List[OutgoingPendingFriend]:
"""List[:class:`OutgoingPendingFriend`]: A list of the clients
outgoing pending friends.
"""
return [pf for pf in self._pending_friends.values() if pf.outgoing]
@property
def blocked_users(self) -> List[BlockedUser]:
"""List[:class:`BlockedUser`]: A list of the users client has
as blocked.
"""
return list(self._blocked_users.values())
@property
def presences(self) -> List[Presence]:
"""List[:class:`Presence`]: A list of the last presences from
currently online friends.
"""
return list(self._presences.values())
def _check_party_confirmation(self) -> None:
k = 'party_member_confirm'
val = k in self._events and len(self._events[k]) > 0
if val != self._join_confirmation:
self._join_confirmation = val
self.default_party_config.update({'join_confirmation': val})
def setup_internal(self) -> None:
logger = logging.getLogger('aioxmpp')
if logger.getEffectiveLevel() == 30:
logger.setLevel(level=logging.ERROR)
def register_methods(self) -> None:
methods = [func for func in dir(self) if callable(getattr(self, func))]
for method_name in methods:
if method_name.startswith(self.event_prefix):
event = method_name[len(self.event_prefix):]
func = getattr(self, method_name)
self.add_event_handler(event, func)
def run(self) -> None:
"""This function starts the loop and then calls :meth:`start` for you.
If you have passed an already running event loop to the client, you
should start the client with :meth:`start`.
.. warning::
This function is blocking and should be the last function to run.
Raises
------
AuthException
Raised if invalid credentials in any form was passed or some
other misc failure.
HTTPException
A request error occured while logging in.
"""
loop = self.loop
_stopped = False
def stopper(*args):
nonlocal _stopped
if not _stopped or not self._closing:
loop.stop()
_stopped = True
async def runner():
nonlocal _stopped
try:
await self.start()
finally:
if not self._closing and not self._closed:
await self.close()
try:
loop.add_signal_handler(signal.SIGINT, stopper)
loop.add_signal_handler(signal.SIGTERM, stopper)
except NotImplementedError:
pass
future = asyncio.ensure_future(runner())
future.add_done_callback(stopper)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info('Terminating event loop.')
_stopped = True
finally:
future.remove_done_callback(stopper)
if not self._closing and not self._closed:
log.info('Client not logged out when terminating loop. '
'Logging out now.')
loop.run_until_complete(self.close())
log.info('Cleaning up loop')
_cleanup_loop(loop)
if not future.cancelled():
return future.result()
# Somehow the exception from _exception_future is not always
# raised so we might have to raise it here.
try:
exc = self._exception_future.exception()
except (asyncio.CancelledError, asyncio.InvalidStateError):
pass
else:
if exc is not None:
raise exc
async def start(self, dispatch_ready: bool = True) -> None:
"""|coro|
Starts the client and logs into the specified user.
.. warning::
This functions is blocking and everything after the line calling
this function will never run! If you are using this function
instead of :meth:`run` you should always call it after everything
else. When the client is ready it will dispatch
:meth:`event_ready`.
Parameters
----------
dispatch_ready: :class:`bool`
Whether or not the client should dispatch the ready event when
ready.
Raises
------
AuthException
Raised if invalid credentials in any form was passed or some
other misc failure.
HTTPException
A request error occured while logging in.
"""
if self._first_start:
self.register_methods()
self._first_start = False
if dispatch_ready:
await self.dispatch_and_wait_event('before_start')
_started_while_restarting = self._restarting
pri = self._reauth_lock.priority if _started_while_restarting else 0
self._closed_event.clear()
self._check_party_confirmation()
if self._closed:
self.http.create_connection()
self._closed = False
ret = await self._login(priority=pri)
if ret is False:
return
self._set_ready()
if dispatch_ready:
self.dispatch_event('ready')
async def waiter(task):
done, pending = await asyncio.wait(
(task, self._exception_future),
return_when=asyncio.FIRST_COMPLETED
)
try:
exc = done.pop().exception()
except asyncio.CancelledError:
pass
else:
raise exc
self._refresh_task = self.loop.create_task(
self.auth.run_refresh_loop()
)
await waiter(self._refresh_task)
if not _started_while_restarting and self._restarting:
async def runner():
while True:
await asyncio.sleep(1)
self._start_runner_task = self.loop.create_task(runner())
await waiter(self._start_runner_task)
async def _login(self, priority: int = 0) -> None:
log.debug('Running authenticating')
ret = await self.auth._authenticate(priority=priority)
if ret is False:
return False
tasks = [
self.http.account_get_by_user_id(
self.auth.account_id,
priority=priority
),
self.http.account_graphql_get_clients_external_auths(
priority=priority
),
self.http.account_get_external_auths_by_id(
self.auth.account_id,
priority=priority
),
]
data, ext_data, extra_ext_data, *_ = await asyncio.gather(*tasks)
data['externalAuths'] = ext_data['myAccount']['externalAuths'] or []
data['extraExternalAuths'] = extra_ext_data
self.user = ClientUser(self, data)
state_fut = asyncio.ensure_future(
self.refresh_caches(priority=priority)
)
if self.auth.eula_check_needed() and self.accept_eula:
await self.auth.accept_eula(
priority=priority
)
log.debug('EULA accepted')
await state_fut
await self.xmpp.run()
log.debug('Connected to XMPP')
await self.initialize_party(priority=priority)
log.debug('Party created')
async def _close(self, *,
close_http: bool = True,
dispatch_close: bool = True,
priority: int = 0) -> None:
self._closing = True
try:
if self.party is not None:
await self.party._leave(priority=priority)
except Exception:
pass
try:
await self.xmpp.close()
except Exception:
pass
async def killer(token):
if token is None:
return
try:
await self.http.account_sessions_kill_token(token)
except HTTPException:
# All exchanged sessions should be killed when the original
# session is killed, but this doesn't seem to be consistant.
# The solution is to attempt to kill each token and then just
# catch 401.
pass
if not self._restarting:
tasks = (
killer(getattr(self.auth, 'ios_access_token', None)),
killer(getattr(self.auth, 'access_token', None)),
)
await asyncio.gather(*tasks)
self._friends.clear()
self._pending_friends.clear()
self._users.clear()
self._blocked_users.clear()
self._presences.clear()
self._ready_event.clear()
if close_http:
self._closed = True
await self.http.close()
if self.auth.refresh_loop_running():
self._refresh_task.cancel()
if not self._restarting:
if (self._start_runner_task is not None
and not self._start_runner_task.cancelled()):
self._start_runner_task.cancel()
self._closing = False
self._set_closed()
log.debug('Successfully logged out')
async def close(self, *,
close_http: bool = True,
dispatch_close: bool = True) -> None:
"""|coro|
Logs the user out and closes running services.
Parameters
----------
close_http: :class:`bool`
Whether or not to close the clients :class:`aiohttp.ClientSession`
when logged out.
dispatch_close: :class:`bool`
Whether or not to dispatch the close event.
Raises
------
HTTPException
An error occured while logging out.
"""
if dispatch_close:
await asyncio.gather(
self.dispatch_and_wait_event('before_close'),
self.dispatch_and_wait_event('close'),
)
await self._close(
close_http=close_http,
dispatch_close=dispatch_close
)
def is_closed(self) -> bool:
""":class:`bool`: Whether the client is running or not."""
return self._closed
def can_restart(self):
return hasattr(self.auth, 'ios_refresh_token')
async def restart(self) -> None:
"""|coro|
Restarts the client completely. All events received while this method
runs are dispatched when it has finished.
Raises
------
AuthException
Raised if invalid credentials in any form was passed or some
other misc failure.
HTTPException
A request error occured while logging in.
"""
self._reauth_lock.priority += 1
priority = self._reauth_lock.priority
async with MaybeLock(self._reauth_lock):
self._restarting = True
self._refresh_times.append(time.time())
ios_refresh_token = self.auth.ios_refresh_token
asyncio.ensure_future(self.recover_events())
await self._close(
close_http=False,
dispatch_close=False,
priority=priority
)
auth = RefreshTokenAuth(
refresh_token=ios_refresh_token
)
auth.initialize(self)
self.auth = auth
async def runner():
try:
await self.start(dispatch_ready=False)
except Exception as e:
return e
tasks = (
self.loop.create_task(runner()),
self.loop.create_task(self.wait_until_ready()),
)
d, p = await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED
)
done_task = d.pop()
if done_task.result() is not None:
p.pop().cancel()
raise done_task.result()
self.dispatch_event('restart')
self._restarting = False
async def recover_events(self, *,
refresh_caches: bool = False,
wait_for_close: bool = True) -> None:
if wait_for_close:
await self.wait_for('xmpp_session_close')
pre_friends = self.friends
pre_pending = self.pending_friends
await self.wait_for('xmpp_session_establish')
if refresh_caches:
await self.refresh_caches()
for friend in pre_friends:
if friend not in self._friends.values():
self.dispatch_event('friend_remove', friend)
added_friends = []
for friend in self._friends.values():
if friend not in pre_friends:
added_friends.append(friend)
self.dispatch_event('friend_add', friend)
for pending in pre_pending:
if (pending not in self._pending_friends.values()
and pending not in added_friends):
self.dispatch_event('friend_request_abort', pending)
for pending in self._pending_friends.values():
if pending not in pre_pending:
self.dispatch_event('friend_request', pending)
def _set_ready(self) -> None:
self._ready_event.set()
def _set_closed(self) -> None:
self._closed_event.set()
def is_ready(self) -> bool:
"""Specifies if the internal state of the client is ready.
Returns
-------
:class:`bool`
``True`` if the internal state is ready else ``False``
"""
return self._ready_event.is_set()
async def wait_until_ready(self) -> None:
"""|coro|
Waits until the internal state of the client is ready.
"""
await self._ready_event.wait()
async def wait_until_closed(self) -> None:
"""|coro|
Waits until the client is fully closed.
"""
await self._closed_event.wait()
def construct_party(self, data: dict, *,
cls: Optional[ClientParty] = None) -> ClientParty:
clazz = cls or self.default_party_config.cls
return clazz(self, data)
async def initialize_party(self, priority: int = 0) -> None:
data = await self.http.party_lookup_user(
self.user.id,
priority=priority
)
if len(data['current']) > 0:
party = self.construct_party(data['current'][0])
await party._leave(priority=priority)
log.debug('Left old party')
await self._create_party(priority=priority)
async def fetch_user_by_display_name(self, display_name, *,
cache: bool = False,
raw: bool = False
) -> Optional[User]:
"""|coro|
Fetches a user from the passed display name. Aliased to
``fetch_profile_by_display_name()`` as well for legacy reasons.
Parameters
----------
display_name: :class:`str`
The display name of the user you want to fetch the user for.
cache: :class:`bool`
If set to True it will try to get the user from the friends or
user cache.
.. note::
Setting this parameter to False will make it an api call.
raw: :class:`bool`
If set to True it will return the data as you would get it from
the api request.
.. note::
Setting raw to True does not work with cache set to True.
Raises
------
HTTPException
An error occured while requesting the user.
Returns
-------
Optional[:class:`User`]
The user requested. If not found it will return ``None``.
"""
if cache:
for u in self._users.values():
try:
if u.display_name is not None:
if u.display_name.casefold() == display_name.casefold(): # noqa
return u
except AttributeError:
pass
res = await self.http.account_graphql_get_by_display_name(display_name)
accounts = res['account']
if len(accounts) == 0:
return None
epic_accounts = [d for d in accounts if d['displayName'] is not None]
if epic_accounts:
account = max(epic_accounts, key=lambda d: len(d['externalAuths']))
else:
account = accounts[0]
if raw:
return account
return self.store_user(account, try_cache=cache)
fetch_profile_by_display_name = fetch_user_by_display_name
async def fetch_users_by_display_name(self, display_name, *,
raw: bool = False
) -> Optional[User]:
"""|coro|
Fetches all users including external users (accounts from other
platforms) that matches the given the display name. Aliased to
``fetch_profiles_by_display_name()`` as well for legacy reasons.
.. warning::
This function is not for requesting multiple users by multiple
display names. Use :meth:`Client.fetch_user()` for that.
Parameters
----------
display_name: :class:`str`
The display name of the users you want to get.
raw: :class:`bool`
If set to True it will return the data as you would get it from
the api request. *Defaults to ``False``*
Raises
------
HTTPException
An error occured while requesting the user.
Returns
-------
List[:class:`User`]
A list containing all payloads found for this user.
"""
res = await self.http.account_graphql_get_by_display_name(display_name)
return [User(self, account) for account in res['account']]
fetch_profiles_by_display_name = fetch_users_by_display_name
async def fetch_user(self, user, *,
cache: bool = False,
raw: bool = False
) -> Optional[User]:
"""|coro|
Fetches a single user by the given id/displayname. Aliased to
``fetch_profile()`` as well for legacy reasons.
Parameters
----------
user: :class:`str`
Id or display name
cache: :class:`bool`
If set to True it will try to get the user from the friends or
user cache and fall back to an api request if not found.
.. note::
Setting this parameter to False will make it an api call.
raw: :class:`bool`
If set to True it will return the data as you would get it from
the api request.
.. note::
Setting raw to True does not work with cache set to True.
Raises
------
HTTPException
An error occured while requesting the user.
Returns
-------
Optional[:class:`User`]
The user requested. If not found it will return ``None``
"""
try:
data = await self.fetch_users((user,), cache=cache, raw=raw)
return data[0]
except IndexError:
return None
fetch_profile = fetch_user
async def fetch_users(self, users, *,
cache: bool = False,
raw: bool = False) -> List[User]:
"""|coro|
Fetches multiple users at once by the given ids/displaynames. Aliased
to ``fetch_profiles()`` as well for legacy reasons.
Parameters
----------
users: List[:class:`str`]
A list/tuple containing ids/displaynames.
cache: :class:`bool`
If set to True it will try to get the users from the friends or
user cache and fall back to an api request if not found.
.. note::
Setting this parameter to False will make it an api call.
raw: :class:`bool`
If set to True it will return the data as you would get it from
the api request.
.. note::
Setting raw to True does not work with cache set to True.
Raises
------
HTTPException
An error occured while requesting user information.
Returns
-------
List[:class:`User`]
Users requested. Only users that are found gets returned.
"""
if len(users) == 0:
return []
_users = []
new = []
tasks = []
def find_by_display_name(dn):
if cache:
for u in self._users.values():
try:
if u.display_name is not None:
if u.display_name.casefold() == dn.casefold():
_users.append(u)
return
except AttributeError:
pass
task = self.http.account_graphql_get_by_display_name(elem)
tasks.append(task)
for elem in users:
if self.is_display_name(elem):
find_by_display_name(elem)
else:
if cache:
p = self.get_user(elem)
if p:
if raw:
_users.append(p.get_raw())
else:
_users.append(p)
continue
new.append(elem)
if len(tasks) > 0:
pfs = await asyncio.gather(*tasks)
for p_data in pfs:
accounts = p_data['account']
for account_data in accounts:
if account_data['displayName'] is not None:
new.append(account_data['id'])
break
else:
for account_data in accounts:
if account_data['displayName'] is None:
new.append(account_data['id'])
break
chunk_tasks = []
chunks = [new[i:i + 100] for i in range(0, len(new), 100)]
for chunk in chunks:
task = self.http.account_graphql_get_multiple_by_user_id(chunk)
chunk_tasks.append(task)
if len(chunks) > 0:
d = await asyncio.gather(*chunk_tasks)
for results in d:
for result in results['accounts']:
if raw:
_users.append(result)
else:
u = self.store_user(result, try_cache=cache)
_users.append(u)
return _users
fetch_profiles = fetch_users
async def fetch_user_by_email(self, email, *,
cache: bool = False,
raw: bool = False) -> Optional[User]:
"""|coro|
Fetches a single user by the email. Aliased to
``fetch_profile_by_email()`` as well for legacy reasons.
.. warning::
Because of epicgames throttling policy, you can only do this
request three times in a timespan of 600 seconds. If you were
to do more than three requests in that timespan, a
:exc:`HTTPException` would be raised.
Parameters
----------
email: :class:`str`
The email of the account you are requesting.
cache: :class:`bool`
If set to True it will try to get the user from the friends or
user cache and fall back to an api request if not found.
.. note::
This method does two api requests but with this set to False
only one request will be done as long as the user is found in
one of the caches.
raw: :class:`bool`
If set to True it will return the data as you would get it from
the api request.
.. note::
Setting raw to True does not work with cache set to True.
Raises
------
HTTPException
An error occured while requesting the user.
Returns
-------
Optional[:class:`User`]
The user requested. If not found it will return ``None``
"""
try:
res = await self.http.account_get_by_email(email)
except HTTPException as e:
m = 'errors.com.epicgames.account.account_not_found'
if e.message_code == m:
return None
raise
# Request the account data through graphql since the one above returns
# empty external auths payload.
account_id = res['id']
return await self.fetch_user(account_id, cache=cache, raw=raw)
fetch_profile_by_email = fetch_user_by_email
async def search_users(self, prefix: str,
platform: UserSearchPlatform
) -> List[UserSearchEntry]:
"""|coro|
Searches after users by a prefix and returns up to 100 matches.
Aliased to ``search_profiles()`` as well for legacy reasons.
Parameters
----------
prefix: :class:`str`
| The prefix you want to search by. The prefix is case insensitive.
| Example: ``Tfue`` will return Tfue's user + up to 99 other
users which have display names that start with or match exactly
to ``Tfue`` like ``Tfue_Faze dequan``.
platform: :class:`UserSearchPlatform`
The platform you wish to search by.
..note::
The platform is only important for prefix matches. All exact
matches are returned regardless of which platform is
specified.
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
List[:class:`UserSearchEntry`]
An ordered list of users that matched the prefix.
"""
if not isinstance(platform, UserSearchPlatform):
raise TypeError(
'The platform passed must be a constant from '
'fortnitepy.UserSearchPlatform'
)
res = await self.http.user_search_by_prefix(
prefix,
platform.value
)
user_ids = [d['accountId'] for d in res]
users = await self.fetch_users(user_ids, raw=True)
lookup = {p['id']: p for p in users}
entries = []
for data in res:
user_data = lookup.get(data['accountId'])
if user_data is None:
continue
obj = UserSearchEntry(self, user_data, data)
entries.append(obj)
return entries
search_profiles = search_users
async def search_sac_by_slug(self, slug: str) -> List[SacSearchEntryUser]:
"""|coro|
Searches for an owner of slug + retrieves owners of similar slugs.
Parameters
----------
slug: :class:`str`
The slug (support a creator code) you wish to search for.
Raises
------
HTTPException
An error occured while requesting fortnite's services.
Returns
-------
List[:class:`SacSearchEntryUser`]
An ordered list of users who matched the exact or slightly
modified slug.
"""
res = await self.http.payment_website_search_sac_by_slug(slug)
user_ids = [e['id'] for e in res]
users = await self.fetch_users(
list(user_ids),
raw=True
)
lookup = {p['id']: p for p in users}
entries = []
for data in res:
user_data = lookup.get(data['id'])
if user_data is None:
continue
obj = SacSearchEntryUser(self, user_data, data)
entries.append(obj)
return entries
async def refresh_caches(self, priority: int = 0) -> None:
self._friends.clear()
self._pending_friends.clear()
self._blocked_users.clear()
tasks = (
self.http.friends_get_all(
include_pending=True,
priority=priority
),
self.http.friends_get_summary(priority=priority),
self.http.presence_get_last_online(priority=priority),
)
raw_friends, raw_summary, raw_presences = await asyncio.gather(*tasks)
ids = [r['accountId'] for r in raw_friends + raw_summary['blocklist']]
chunks = [ids[i:i + 100] for i in range(0, len(ids), 100)]
users = {}
tasks = [
self.http.account_graphql_get_multiple_by_user_id(
chunk,
priority=priority
)
for chunk in chunks
]
if tasks:
done = await asyncio.gather(*tasks)
else:
done = []
for results in done:
for user in results['accounts']:
users[user['id']] = user
for friend in raw_friends:
try:
data = users[friend['accountId']]
except KeyError:
continue
if friend['status'] == 'ACCEPTED':
self.store_friend({**friend, **data})
elif friend['status'] == 'PENDING':
if friend['direction'] == 'INBOUND':
self.store_incoming_pending_friend({**friend, **data})
else:
self.store_outgoing_pending_friend({**friend, **data})
for data in raw_summary['friends']:
friend = self.get_friend(data['accountId'])
if friend is not None:
friend._update_summary(data)
for user_id, data in raw_presences.items():
friend = self.get_friend(user_id)
if friend is not None:
try:
value = data[0]['last_online']
except (IndexError, KeyError):
value = None
friend._update_last_logout(
self.from_iso(value) if value is not None else None
)
for data in raw_summary['blocklist']:
user = users.get(data['accountId'])
if user is not None:
self.store_blocked_user(user)
def store_user(self, data: dict, *, try_cache: bool = True) -> User:
try:
user_id = data.get(
'accountId',
data.get('id', data.get('account_id'))
)
if try_cache:
return self._users[user_id]
except KeyError:
pass
user = User(self, data)
if self.cache_users:
self._users[user.id] = user
return user
def get_user(self, user_id: str) -> Optional[User]:
"""Tries to get a user from the user cache by the given user id.
Parameters
----------
user_id: :class:`str`
The id of the user.
Returns
-------
Optional[:class:`User`]
The user if found, else ``None``
"""
user = self._users.get(user_id)
if user is None:
friend = self.get_friend(user_id)
if friend is not None:
user = User(self, friend.get_raw())
if self.cache_users:
self._users[user.id] = user
return user
def store_friend(self, data: dict, *,
summary: Optional[dict] = None,
try_cache: bool = True) -> Friend:
try:
user_id = data.get(
'accountId',
data.get('id', data.get('account_id'))
)
if try_cache:
return self._friends[user_id]
except KeyError:
pass
friend = Friend(self, data)
if summary is not None:
friend._update_summary(summary)
self._friends[friend.id] = friend
return friend
def get_friend(self, user_id: str) -> Optional[Friend]:
"""Tries to get a friend from the friend cache by the given user id.
Parameters
----------
user_id: :class:`str`
The id of the friend.
Returns
-------
Optional[:class:`Friend`]
The friend if found, else ``None``
"""
return self._friends.get(user_id)
def store_incoming_pending_friend(self, data: dict, *,
try_cache: bool = True
) -> IncomingPendingFriend:
try:
user_id = data.get(
'accountId',
data.get('id', data.get('account_id'))
)
if try_cache:
return self._pending_friends[user_id]
except KeyError:
pass
pending_friend = IncomingPendingFriend(self, data)
self._pending_friends[pending_friend.id] = pending_friend
return pending_friend
def store_outgoing_pending_friend(self, data: dict, *,
try_cache: bool = True
) -> OutgoingPendingFriend:
try:
user_id = data.get(
'accountId',
data.get('id', data.get('account_id'))
)
if try_cache:
return self._pending_friends[user_id]
except KeyError:
pass
pending_friend = OutgoingPendingFriend(self, data)
self._pending_friends[pending_friend.id] = pending_friend
return pending_friend
def get_pending_friend(self,
user_id: str
) -> Optional[Union[IncomingPendingFriend,
OutgoingPendingFriend]]:
"""Tries to get a pending friend from the pending friend cache by the
given user id.
Parameters
----------
user_id: :class:`str`
The id of the pending friend.
Returns
-------
Optional[Union[:class:`IncomingPendingFriend`,
:class:`OutgoingPendingFriend`]]
The pending friend if found, else ``None``
""" # noqa
return self._pending_friends.get(user_id)
def get_incoming_pending_friend(self,
user_id: str
) -> Optional[IncomingPendingFriend]:
"""Tries to get an incoming pending friend from the pending friends
cache by the given user id.
Parameters
----------
user_id: :class:`str`
The id of the incoming pending friend.
Returns
-------
Optional[:class:`IncomingPendingFriend`]
The incoming pending friend if found, else ``None``.
"""
pending_friend = self.get_pending_friend(user_id)
if pending_friend and pending_friend.incoming:
return pending_friend
def get_outgoing_pending_friend(self,
user_id: str
) -> Optional[OutgoingPendingFriend]:
"""Tries to get an outgoing pending friend from the pending friends
cache by the given user id.
Parameters
----------
user_id: :class:`str`
The id of the outgoing pending friend.
Returns
-------
Optional[:class:`OutgoingPendingFriend`]
The outgoing pending friend if found, else ``None``.
"""
pending_friend = self.get_pending_friend(user_id)
if pending_friend and pending_friend.outgoing:
return pending_friend
def store_blocked_user(self, data: dict, *,
try_cache: bool = True) -> BlockedUser:
try:
user_id = data.get(
'accountId',
data.get('id', data.get('account_id'))
)
if try_cache:
return self._blocked_users[user_id]
except KeyError:
pass
blocked_user = BlockedUser(self, data)
self._blocked_users[blocked_user.id] = blocked_user
return blocked_user
def get_blocked_user(self, user_id: str) -> Optional[BlockedUser]:
"""Tries to get a blocked user from the blocked users cache by the
given user id.
Parameters
----------
user_id: :class:`str`
The id of the blocked user.
Returns
-------
Optional[:class:`BlockedUser`]
The blocked user if found, else ``None``
"""
return self._blocked_users.get(user_id)
def get_presence(self, user_id: str) -> Optional[Presence]:
"""Tries to get the latest received presence from the presence cache.
Parameters
----------
user_id: :class:`str`
The id of the friend you want the last presence of.
Returns
-------
Optional[:class:`Presence`]
The presence if found, else ``None``
"""
return self._presences.get(user_id)
def has_friend(self, user_id: str) -> bool:
"""Checks if the client is friends with the given user id.
Parameters
----------
user_id: :class:`str`
The id of the user you want to check.
Returns
-------
:class:`bool`
``True`` if user is friends with the client else ``False``
"""
return self.get_friend(user_id) is not None
def is_pending(self, user_id: str) -> bool:
"""Checks if the given user id is a pending friend of the client.
Parameters
----------
user_id: :class:`str`
The id of the user you want to check.
Returns
-------
:class:`bool`
``True`` if user is a pending friend else ``False``
"""
return self.get_pending_friend(user_id) is not None
def is_blocked(self, user_id: str) -> bool:
"""Checks if the given user id is blocked by the client.
Parameters
----------
user_id: :class:`str`
The id of the user you want to check.
Returns
-------
:class:`bool`
``True`` if user is blocked else ``False``
"""
return self.get_blocked_user(user_id) is not None
async def fetch_blocklist(self) -> List[str]:
"""|coro|
Retrieves the blocklist with an api call.
Raises
------
HTTPException
An error occured while fetching blocklist.
Returns
-------
List[:class:`str`]
List of ids
"""
return await self.http.friends_get_blocklist()
async def block_user(self, user_id: str) -> None:
"""|coro|
Blocks a user by a given user id.
Parameters
----------
user_id: :class:`str`
The id of the user you want to block.
Raises
------
HTTPException
Something went wrong when trying to block this user.
"""
await self.http.friends_block(user_id)
async def unblock_user(self, user_id: str) -> None:
"""|coro|
Unblocks a user by a given user id.
Parameters
----------
user_id: :class:`str`
The id of the user you want to unblock
Raises
------
HTTPException
Something went wrong when trying to unblock this user.
"""
await self.http.friends_unblock(user_id)
def is_id(self, value: str) -> bool:
"""Simple function that finds out if a :class:`str` is a valid id
Parameters
----------
value: :class:`str`
The string you want to check.
Returns
-------
:class:`bool`
``True`` if string is valid else ``False``
"""
return isinstance(value, str) and len(value) > 16
def is_display_name(self, val: str) -> bool:
"""Simple function that finds out if a :class:`str` is a valid displayname
Parameters
----------
value: :class:`str`
The string you want to check.
Returns
-------
:class:`bool`
``True`` if string is valid else ``False``
"""
return isinstance(val, str) and 3 <= len(val) <= 16
async def add_friend(self, user_id: str) -> None:
"""|coro|
Sends a friend request to the specified user id.
Parameters
----------
user_id: :class:`str`
The id of the user you want to add.
Raises
------
NotFound
The specified user does not exist.
DuplicateFriendship
The client is already friends with this user.
FriendshipRequestAlreadySent
The client has already sent a friendship request that has not been
handled yet by the user.
MaxFriendshipsExceeded
The client has hit the max amount of friendships a user can
have at a time. For most accounts this limit is set to ``1000``
but it could be higher for others.
InviteeMaxFriendshipsExceeded
The user you attempted to add has hit the max amount of friendships
a user can have at a time.
InviteeMaxFriendshipRequestsExceeded
The user you attempted to add has hit the max amount of friendship
requests a user can have at a time. This is usually ``700`` total
requests.
Forbidden
The client is not allowed to send friendship requests to the user
because of the users settings.
HTTPException
An error occured while requesting to add this friend.
"""
try:
await self.http.friends_add_or_accept(user_id)
except HTTPException as exc:
m = 'errors.com.epicgames.friends.account_not_found'
if exc.message_code == m:
raise NotFound('The specified account does not exist.')
m = 'errors.com.epicgames.friends.duplicate_friendship'
if exc.message_code == m:
raise DuplicateFriendship('This friendship already exists.')
m = 'errors.com.epicgames.friends.friend_request_already_sent'
if exc.message_code == m:
raise FriendshipRequestAlreadySent(
'A friendship request already exists for this user.'
)
m = 'errors.com.epicgames.friends.inviter_friendships_limit_exceeded' # noqa
if exc.message_code == m:
raise MaxFriendshipsExceeded(
'The client has hit the friendships limit.'
)
m = 'errors.com.epicgames.friends.invitee_friendships_limit_exceeded' # noqa
if exc.message_code == m:
raise InviteeMaxFriendshipsExceeded(
'The user has hit the friendships limit.'
)
m = 'errors.com.epicgames.friends.incoming_friendships_limit_exceeded' # noqa
if exc.message_code == m:
raise InviteeMaxFriendshipRequestsExceeded(
'The user has hit the incoming friendship requests limit.'
)
m = ('errors.com.epicgames.friends.'
'cannot_friend_due_to_target_settings')
if exc.message_code == m:
raise Forbidden('You cannot send friendship requests to '
'this user.')
raise
async def accept_friend(self, user_id: str) -> Friend:
"""|coro|
.. warning::
Do not use this method to send a friend request. It will then not
return until the friend request has been accepted by the user.
Accepts a request.
Parameters
----------
user_id: :class:`str`
The id of the user you want to accept.
Raises
------
NotFound
The specified user does not exist.
DuplicateFriendship
The client is already friends with this user.
FriendshipRequestAlreadySent
The client has already sent a friendship request that has not been
handled yet by the user.
Forbidden
The client is not allowed to send friendship requests to the user
because of the users settings.
HTTPException
An error occured while requesting to accept this friend.
Returns
-------
:class:`Friend`
Object of the friend you just added.
"""
await self.add_friend(user_id)
friend = await self.wait_for('friend_add',
check=lambda f: f.id == user_id)
return friend
async def remove_or_decline_friend(self, user_id: str) -> None:
"""|coro|
Removes a friend by the given id.
Parameters
----------
user_id: :class:`str`
The id of the friend you want to remove.
Raises
------
HTTPException
Something went wrong when trying to remove this friend.
"""
await self.http.friends_remove_or_decline(user_id)
async def dispatch_and_wait_event(self, event: str,
*args: Any,
**kwargs: Any) -> None:
coros = self._events.get(event, [])
tasks = [coro() for coro in coros]
if tasks:
await asyncio.wait(
tasks,
return_when=asyncio.ALL_COMPLETED
)
def _dispatcher(self, coro: Awaitable,
*args: Any,
**kwargs: Any) -> asyncio.Future:
return asyncio.ensure_future(coro(*args, **kwargs))
def dispatch_event(self, event: str,
*args: Any,
**kwargs: Any) -> List[asyncio.Future]:
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, check) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = check(*args)
except Exception as e:
future.set_exception(e)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
tasks = []
if event in self._events:
for coro in self._events[event]:
task = self._dispatcher(coro, *args, **kwargs)
tasks.append(task)
return tasks
def wait_for(self, event: str, *,
check: Callable = None,
timeout: Optional[int] = None) -> Any:
"""|coro|
Waits for an event to be dispatch.
In case the event returns more than one arguments, a tuple is passed
containing the arguments.
Examples
--------
This example waits for the author of a :class:`FriendMessage` to say
hello.: ::
@client.event
async def event_friend_message(message):
await message.reply('Say hello!')
def check_function(m):
return m.author.id == message.author.id
msg = await client.wait_for('message', check=check_function, timeout=60)
await msg.reply('Hello {0.author.display_name}!'.format(msg))
This example waits for the the leader of a party to promote the bot
after joining and then sets a new custom key: ::
@client.event
async def event_party_member_join(member):
# checks if the member that joined is the UserClient
if member.id != client.user.id:
return
def check(m):
return m.id == client.user.id
try:
await client.wait_for('party_member_promote', check=check, timeout=120)
except asyncio.TimeoutError:
await member.party.send('You took too long to promote me!')
await member.party.set_custom_key('my_custom_key_123')
Parameters
----------
event: :class:`str`
The name of the event.
.. note::
| The name of the event must be **without** the ``event_``
prefix.
|
| Wrong = ``event_friend_message``.
| Correct = ``friend_message``.
check: Optional[Callable]
A predicate to check what to wait for.
Defaults to a predicate that always returns ``True``. This means
it will return the first result unless you pass another predicate.
timeout: :class:`int`
How many seconds to wait for before asyncio.TimeoutError is raised.
*Defaults to ``None`` which means it will wait forever.*
Raises
------
asyncio.TimeoutError
No event was retrieved in the time you specified.
Returns
-------
Any
Returns arguments based on the event you are waiting for. An event
might return no arguments, one argument or a tuple of arguments.
Check the :ref:`event reference <fortnitepy-events-api> for more
information about the returning arguments.`
""" # noqa
future = self.loop.create_future()
if check is None:
def _check(*args):
return True
check = _check
ev = (event.lower()).replace(self.event_prefix, '')
try:
listeners = self._listeners[ev]
except KeyError:
listeners = []
self._listeners[ev] = listeners
listeners.append((future, check))
return asyncio.wait_for(future, timeout)
def _event_has_handler(self, event: str) -> bool:
handlers = self._events.get(event.lower())
return handlers is not None and len(handlers) > 0
def _event_has_destination(self, event):
if event in self._listeners:
return True
elif self._event_has_handler(event):
return True
return False
def add_event_handler(self, event: str, coro: Awaitable[Any]) -> None:
"""Registers a coroutine as an event handler. You can register as many
coroutines as you want to a single event.
Parameters
----------
event: :class:`str`
The name of the event you want to register this coro for.
coro: :ref:`coroutine <coroutine>`
The coroutine to function as the handler for the specified event.
Raises
------
TypeError
The function passed to coro is not a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
if event.startswith(self.event_prefix):
event = event[len(self.event_prefix):]
if event not in self._events:
self._events[event] = []
self._events[event].append(coro)
def remove_event_handler(self, event: str, coro: Awaitable) -> None:
"""Removes a coroutine as an event handler.
Parameters
----------
event: :class:`str`
The name of the event you want to remove this coro for.
coro: :ref:`coroutine <coroutine>`
The coroutine that already functions as a handler for the
specified event.
"""
if event not in self._events:
return
self._events[event] = [c for c in self._events[event] if c != coro]
def event(self,
event_or_coro: Union[str, Awaitable[Any]] = None) -> Awaitable:
"""A decorator to register an event.
.. note::
You do not need to decorate events in a subclass of :class:`Client`
but the function names of event handlers must follow this format
``event_<event>``.
Usage: ::
@client.event
async def event_friend_message(message):
await message.reply('Thanks for your message!')
@client.event('friend_message')
async def my_message_handler(message):
await message.reply('Thanks for your message!')
Raises
------
TypeError
The decorated function is not a coroutine.
TypeError
Event is not specified as argument or function name with event
prefix.
"""
is_coro = callable(event_or_coro)
def pred(coro):
if isinstance(coro, staticmethod):
coro = coro.__func__
if not asyncio.iscoroutinefunction(coro):
raise TypeError('the decorated function must be a coroutine')
if is_coro or event_or_coro is None:
if not coro.__name__.startswith(self.event_prefix):
raise TypeError('non specified events must follow '
'this function name format: '
'"{}<event>"'.format(self.event_prefix))
name = coro.__name__[len(self.event_prefix):]
else:
name = event_or_coro
self.add_event_handler(name, coro)
log.debug('{} has been registered as a handler for the '
'event {}'.format(coro.__name__, name))
return coro
return pred(event_or_coro) if is_coro else pred
def _process_stats_times(self, start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None
) -> Tuple[Optional[int], Optional[int]]:
epoch = datetime.datetime.utcfromtimestamp(0)
if isinstance(start_time, datetime.datetime):
start_time = int((start_time - epoch).total_seconds())
elif isinstance(start_time, SeasonStartTimestamp):
start_time = start_time.value
if isinstance(end_time, datetime.datetime):
end_time = int((end_time - epoch).total_seconds())
elif isinstance(end_time, SeasonEndTimestamp):
end_time = end_time.value
return start_time, end_time
async def fetch_br_stats(self, user_id: str, *,
start_time: Optional[DatetimeOrTimestamp] = None,
end_time: Optional[DatetimeOrTimestamp] = None
) -> StatsV2:
"""|coro|
Gets Battle Royale stats the specified user.
Parameters
----------
user_id: :class:`str`
The id of the user you want to fetch stats for.
start_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonStartTimestamp`]]
The UTC start time of the time period to get stats from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
end_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonEndTimestamp`]]
The UTC end time of the time period to get stats from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
Raises
------
Forbidden
| The user has chosen to be hidden from public stats by disabling
the fortnite setting below.
| ``Settings`` -> ``Account and Privacy`` -> ``Show on career
leaderboard``
HTTPException
An error occured while requesting.
Returns
-------
:class:`StatsV2`
An object representing the stats for this user. If the user was
not found ``None`` is returned.
""" # noqa
start_time, end_time = self._process_stats_times(start_time, end_time)
tasks = [
self.fetch_user(user_id, cache=True),
self.http.stats_get_v2(
user_id,
start_time=start_time,
end_time=end_time
)
]
results = await asyncio.gather(*tasks)
if results[1] == '':
raise Forbidden('This user has chosen to be hidden '
'from public stats.')
return StatsV2(*results) if results[0] is not None else None
async def _multiple_stats_chunk_requester(self, user_ids: List[str], stats: List[str], *, # noqa
collection: Optional[str] = None,
start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None # noqa
) -> List[dict]:
chunks = [user_ids[i:i+51] for i in range(0, len(user_ids), 51)]
tasks = []
for chunk in chunks:
tasks.append(self.http.stats_get_multiple_v2(
chunk,
stats,
category=collection,
start_time=start_time,
end_time=end_time
))
results = await asyncio.gather(*tasks)
return [item for sub in results for item in sub]
async def _fetch_multiple_br_stats(self, cls: _StatsBase,
user_ids: List[str],
stats: List[str],
*,
collection: Optional[str] = None, # noqa
start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None, # noqa
) -> Dict[str, StatsV2]:
start_time, end_time = self._process_stats_times(start_time, end_time)
tasks = [
self.fetch_users(user_ids, cache=True),
self._multiple_stats_chunk_requester(
user_ids,
stats,
collection=collection,
start_time=start_time,
end_time=end_time
)
]
results = await asyncio.gather(*tasks)
if len(results[0]) > 0 and isinstance(results[0][0], dict):
results = results[::-1]
res = {}
for udata in results[1]:
r = [x for x in results[0] if x.id == udata['accountId']]
user = r[0] if len(r) != 0 else None
res[udata['accountId']] = (cls(user, udata)
if user is not None else None)
return res
async def fetch_multiple_br_stats(self, user_ids: List[str],
stats: List[str],
*,
start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None # noqa
) -> Dict[str, Optional[StatsV2]]:
"""|coro|
Gets Battle Royale stats for multiple users at the same time.
.. note::
This function is not the same as doing :meth:`fetch_br_stats` for
multiple users. The expected return for this function would not be
all the stats for the specified users but rather the stats you
specify.
Example usage: ::
async def stat_function():
stats = [
fortnitepy.StatsV2.create_stat('placetop1', fortnitepy.V2Input.KEYBOARDANDMOUSE, 'defaultsolo'),
fortnitepy.StatsV2.create_stat('kills', fortnitepy.V2Input.KEYBOARDANDMOUSE, 'defaultsolo'),
fortnitepy.StatsV2.create_stat('matchesplayed', fortnitepy.V2Input.KEYBOARDANDMOUSE, 'defaultsolo')
]
# get the users and create a list of their ids.
users = await self.fetch_users(['Ninja', 'DrLupo'])
user_ids = [u.id for u in users] + ['NonValidUserIdForTesting']
data = await self.fetch_multiple_br_stats(user_ids=user_ids, stats=stats)
for id, res in data.items():
if res is not None:
print('ID: {0} | Stats: {1}'.format(id, res.get_stats()))
else:
print('ID: {0} not found.'.format(id))
# Example output:
# ID: 463ca9d604524ce38071f512baa9cd70 | Stats: {'keyboardmouse': {'defaultsolo': {'wins': 759, 'kills': 28093, 'matchesplayed': 6438}}}
# ID: 3900c5958e4b4553907b2b32e86e03f8 | Stats: {'keyboardmouse': {'defaultsolo': {'wins': 1763, 'kills': 41375, 'matchesplayed': 7944}}}
# ID: 4735ce9132924caf8a5b17789b40f79c | Stats: {'keyboardmouse': {'defaultsolo': {'wins': 1888, 'kills': 40784, 'matchesplayed': 5775}}}
# ID: NonValidUserIdForTesting not found.
Parameters
----------
user_ids: List[:class:`str`]
A list of ids you are requesting the stats for.
stats: List[:class:`str`]
A list of stats to get for the users. Use
:meth:`StatsV2.create_stat` to create the stats.
Example: ::
[
fortnitepy.StatsV2.create_stat('placetop1', fortnitepy.V2Input.KEYBOARDANDMOUSE, 'defaultsolo'),
fortnitepy.StatsV2.create_stat('kills', fortnitepy.V2Input.KEYBOARDANDMOUSE, 'defaultsolo'),
fortnitepy.StatsV2.create_stat('matchesplayed', fortnitepy.V2Input.KEYBOARDANDMOUSE, 'defaultsolo')
]
start_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonStartTimestamp`]]
The UTC start time of the time period to get stats from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
end_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonEndTimestamp`]]
The UTC end time of the time period to get stats from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
Dict[:class:`str`, Optional[:class:`StatsV2`]]
A mapping where :class:`StatsV2` is bound to its owners id. If a
userid was not found then the value bound to that userid will be
``None``.
.. note::
If a users stats is missing in the returned mapping it means
that the user has opted out of public leaderboards and that
the client therefore does not have permissions to requests
their stats.
""" # noqa
res = await self._fetch_multiple_br_stats(
cls=StatsV2,
user_ids=user_ids,
stats=stats,
start_time=start_time,
end_time=end_time,
)
return res
async def fetch_multiple_br_stats_collections(self, user_ids: List[str],
collection: Optional[StatsCollectionType] = None, # noqa
*,
start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None # noqa
) -> Dict[str, Optional[StatsCollection]]: # noqa
"""|coro|
Gets Battle Royale stats collections for multiple users at the same time.
Parameters
----------
user_ids: List[:class:`str`]
A list of ids you are requesting the stats for.
collection: :class:`StatsCollectionType`
The collection to receive. Collections are predefined
stats that it attempts to request.
start_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonStartTimestamp`]]
The UTC start time of the time period to get stats from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
end_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonEndTimestamp`]]
The UTC end time of the time period to get stats from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
Dict[:class:`str`, Optional[:class:`StatsCollection`]]
A mapping where :class:`StatsCollection` is bound to its owners id. If a
userid was not found then the value bound to that userid will be
``None``.
.. note::
If a users stats is missing in the returned mapping it means
that the user has opted out of public leaderboards and that
the client therefore does not have permissions to requests
their stats.
""" # noqa
res = await self._fetch_multiple_br_stats(
cls=StatsCollection,
user_ids=user_ids,
stats=[],
collection=collection.value,
start_time=start_time,
end_time=end_time,
)
return res
async def fetch_multiple_battlepass_levels(self,
users: List[str],
season: int,
*,
start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None # noqa
) -> Dict[str, float]:
"""|coro|
Fetches multiple users battlepass level.
Parameters
----------
users: List[:class:`str`]
List of user ids.
season: :class:`int`
The season number to request the battlepass levels for.
.. warning::
If you are requesting the previous season and the new season has not been
added to the library yet (check :class:`SeasonStartTimestamp`), you have to
manually include the previous seasons end timestamp in epoch seconds.
start_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonStartTimestamp`]]
The UTC start time of the window to get the battlepass level from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
end_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonEndTimestamp`]]
The UTC end time of the window to get the battlepass level from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
Dict[:class:`str`, Optional[:class:`float`]]
Users battlepass level mapped to their account id. Returns ``None``
if no battlepass level was found. If a user has career board set
to private, he/she will not appear in the result. Therefore you
should never expect a user to be included.
.. note::
The decimals are the percent progress to the next level.
E.g. ``208.63`` -> ``Level 208 and 63% on the way to 209.``
.. note::
If a users battlepass level is missing in the returned mapping it means
that the user has opted out of public leaderboards and that
the client therefore does not have permissions to requests
their stats.
""" # noqa
start_time, end_time = self._process_stats_times(start_time, end_time)
if end_time is not None:
e = getattr(SeasonStartTimestamp, 'SEASON_{}'.format(season), None)
if e is not None and end_time < e.value:
raise ValueError(
'end_time can\'t be lower than the seasons start timestamp'
)
e = getattr(BattlePassStat, 'SEASON_{}'.format(season), None)
if e is not None:
info = e.value
stats = info[0] if isinstance(info[0], tuple) else (info[0],)
end_time = end_time if end_time is not None else info[1]
else:
stats = ('s{0}_social_bp_level'.format(season),)
data = await self._multiple_stats_chunk_requester(
users,
stats,
start_time=start_time,
end_time=end_time
)
def get_stat(user_data):
for stat in stats:
value = user_data.get(stat)
if value is not None:
return value / 100
return {e['accountId']: get_stat(e['stats']) for e in data}
async def fetch_battlepass_level(self, user_id: str, *,
season: int,
start_time: Optional[DatetimeOrTimestamp] = None, # noqa
end_time: Optional[DatetimeOrTimestamp] = None # noqa
) -> float:
"""|coro|
Fetches a users battlepass level.
Parameters
----------
user_id: :class:`str`
The user id to fetch the battlepass level for.
season: :class:`int`
The season number to request the battlepass level for.
.. warning::
If you are requesting the previous season and the new season has not been
added to the library yet (check :class:`SeasonStartTimestamp`), you have to
manually include the previous seasons end timestamp in epoch seconds.
start_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonStartTimestamp`]]
The UTC start time of the window to get the battlepass level from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
end_time: Optional[Union[:class:`int`, :class:`datetime.datetime`, :class:`SeasonEndTimestamp`]]
The UTC end time of the window to get the battlepass level from.
*Must be seconds since epoch, :class:`datetime.datetime` or a constant from SeasonEndTimestamp*
*Defaults to None*
Raises
------
Forbidden
User has private career board.
HTTPException
An error occured while requesting.
Returns
-------
Optional[:class:`float`]
The users battlepass level. ``None`` is returned if the user has
not played any real matches this season.
.. note::
The decimals are the percent progress to the next level.
E.g. ``208.63`` -> ``Level 208 and 63% on the way to 209.``
""" # noqa
data = await self.fetch_multiple_battlepass_levels(
(user_id,),
season=season,
start_time=start_time,
end_time=end_time
)
if user_id not in data:
raise Forbidden('User has private career board.')
return data[user_id]
async def fetch_leaderboard(self, stat: str) -> List[Dict[str, StrOrInt]]:
"""|coro|
Fetches the leaderboard for a stat.
.. warning::
For some weird reason, the only valid stat you can pass is
one with ``placetop1`` (``wins`` is also accepted).
Example usage: ::
async def get_leaderboard():
stat = fortnitepy.StatsV2.create_stat(
'wins',
fortnitepy.V2Input.KEYBOARDANDMOUSE,
'defaultsquad'
)
data = await client.fetch_leaderboard(stat)
for placement, entry in enumerate(data):
print('[{0}] Id: {1} | Wins: {2}'.format(
placement, entry['account'], entry['value']))
Parameters
----------
stat: :class:`str`
The stat you are requesting the leaderboard entries for. You can
use :meth:`StatsV2.create_stat` to create this string.
Raises
------
ValueError
You passed an invalid/non-accepted stat argument.
HTTPException
An error occured when requesting.
Returns
-------
List[Dict[:class:`str`, Union[:class:`str`, :class:`int`]]]
List of dictionaries containing entry data. Example return: ::
{
'account': '4480a7397f824fe4b407077fb9397fbb',
'value': 5082
}
"""
data = await self.http.stats_get_leaderboard_v2(stat)
if len(data['entries']) == 0:
raise ValueError('{0} is not a valid stat'.format(stat))
return data['entries']
async def _reconnect_to_party(self):
now = datetime.datetime.utcnow()
secs = (now - self.xmpp._last_disconnected_at).total_seconds()
if secs >= self.default_party_member_config.offline_ttl:
return await self._create_party()
data = await self.http.party_lookup_user(
self.user.id
)
if data['current']:
party_data = data['current'][0]
async with self._join_party_lock:
try:
await self._join_party(
party_data,
event='party_member_reconnect'
)
except Exception:
await self._create_party(acquire=False)
raise
else:
await self._create_party()
async def _create_party(self,
config: Optional[dict] = None,
acquire: bool = True,
priority: int = 0) -> ClientParty:
aquiring = not self.auth._refresh_lock.locked() and acquire
try:
if aquiring:
await self._join_party_lock.acquire()
if isinstance(config, dict):
cf = {**self.default_party_config.config, **config}
else:
cf = self.default_party_config.config
while True:
try:
data = await self.http.party_create(
cf,
priority=priority
)
break
except HTTPException as exc:
if exc.message_code != ('errors.com.epicgames.social.'
'party.user_has_party'):
raise
data = await self.http.party_lookup_user(
self.user.id,
priority=priority
)
try:
await self.http.party_leave(
data['current'][0]['id'],
priority=priority
)
except HTTPException as e:
m = ('errors.com.epicgames.social.'
'party.party_not_found')
if e.message_code != m:
raise
await self.xmpp.leave_muc()
config = {**cf, **data['config']}
party = self.construct_party(data)
await party._update_members(
members=data['members'],
priority=priority
)
self.party = party
tasks = [
self.loop.create_task(party.join_chat()),
]
await party.meta.meta_ready_event.wait()
updated, deleted, cfg1 = party.meta.set_privacy(config['privacy'])
edit_updated, edit_deleted, cfg2 = await party._edit(
*party._default_config.meta
)
# Filter out urn:epic:* properties that was set in party create
# payload.
default_schema = {
k: v for k, v in party.meta.schema.items()
if k.startswith('Default:')
}
tasks.append(party.patch(
updated={
**default_schema,
**updated,
**edit_updated,
**party.construct_squad_assignments(),
**party.meta.set_voicechat_implementation('EOSVoiceChat')
},
deleted=[*deleted, *edit_deleted],
priority=priority,
config={**cfg1, **cfg2},
))
await asyncio.gather(*tasks)
return party
finally:
if aquiring:
self._join_party_lock.release()
def is_creating_party(self) -> bool:
return self._join_party_lock.locked()
async def wait_until_party_ready(self) -> None:
await self._join_party_lock.wait()
async def fetch_party(self, party_id: str) -> Party:
"""|coro|
Fetches a party by its id.
Parameters
----------
party_id: :class:`str`
The id of the party.
Raises
------
Forbidden
You are not allowed to look up this party.
Returns
-------
Optional[:class:`Party`]
The party that was fetched. ``None`` if not found.
"""
try:
data = await self.http.party_lookup(party_id)
except HTTPException as exc:
m = 'errors.com.epicgames.social.party.party_not_found'
if exc.message_code == m:
return None
m = 'errors.com.epicgames.social.party.party_query_forbidden'
if exc.message_code == m:
raise Forbidden('You are not allowed to lookup this party.')
raise
party = Party(self, data)
await party._update_members(members=data['members'])
return party
async def _join_party(self, party_data: dict, *,
event: str = 'party_member_join') -> ClientParty:
async with self._internal_join_party_lock:
party = self.construct_party(party_data)
await party._update_members(party_data['members'])
self.party = party
def check(m):
if m.id != self.user.id:
return False
if party.id != m.party.id:
return False
return True
future = asyncio.ensure_future(
self.wait_for(event, check=check, timeout=5),
)
try:
await self.http.party_join_request(party.id)
except HTTPException as e:
if not future.cancelled():
future.cancel()
m = 'errors.com.epicgames.social.party.party_join_forbidden' # noqa
if e.message_code == m:
raise Forbidden(
'You are not allowed to join this party.'
)
raise
party_data = await self.http.party_lookup(party.id)
party = self.construct_party(party_data)
self.party = party
asyncio.ensure_future(party.join_chat())
await party._update_members(party_data['members'])
try:
await future
except asyncio.TimeoutError:
raise asyncio.TimeoutError('Party join timed out.')
return party
async def join_party(self, party_id: str) -> ClientParty:
"""|coro|
Joins a party by the party id.
Parameters
----------
party_id: :class:`str`
The id of the party you wish to join.
Raises
------
.. warning::
Because the client has to leave its current party before joining
a new one, a new party is created if some of these errors are
raised. Most of the time though this is not the case and the client
will remain in its current party.
PartyError
You are already a member of this party.
NotFound
The party was not found.
Forbidden
You are not allowed to join this party because it's private
and you have not been a part of it before.
.. note::
If you have been a part of the party before but got
kicked, you are ineligible to join this party and this
error is raised.
HTTPException
An error occurred when requesting to join the party.
Returns
-------
:class:`ClientParty`
The party that was just joined.
"""
async with self._join_party_lock:
if party_id == self.party.id:
raise PartyError('You are already a member of this party.')
try:
party_data = await self.http.party_lookup(party_id)
except HTTPException as e:
m = 'errors.com.epicgames.social.party.party_not_found'
if e.message_code == m:
raise NotFound(
'Could not find a party with the id {0}'.format(
party_id
)
)
m = 'errors.com.epicgames.social.party.party_query_forbidden' # noqa
if e.message_code == m:
raise Forbidden(
'You are not allowed to join this party.'
)
raise
try:
await self.party._leave()
party = await self._join_party(party_data)
return party
except Exception:
await self._create_party(acquire=False)
raise
async def set_presence(self, status: str, *,
away: AwayStatus = AwayStatus.ONLINE) -> None:
"""|coro|
Sends and sets the status. This status message will override all other
presence statuses including party presence status.
Parameters
----------
status: :class:`str`
The status you want to set.
away: :class:`AwayStatus`
The away status to use. Defaults to :attr:`AwayStatus.ONLINE`.
Raises
------
TypeError
The status you tried to set were not a str.
"""
if not isinstance(status, str):
raise TypeError('status must be a str')
self.status = status
self.away = away
await self.xmpp.send_presence(
status=status,
show=away.value
)
async def send_presence(self, status: Union[str, dict], *,
away: AwayStatus = AwayStatus.ONLINE,
to: Optional[JID] = None) -> None:
"""|coro|
Sends this status to all or one single friend.
Parameters
----------
status: Union[:class:`str`, :class:`dict`]
The status message in :class:`str` or full status in :class:`dict`.
away: :class:`AwayStatus`
The away status to use. Defaults to :attr:`AwayStatus.ONLINE`.
to: Optional[:class:`aioxmpp.JID`]
The JID of the user that should receive this status.
*Defaults to None which means it will send to all friends.*
Raises
------
TypeError
Status was an invalid type.
"""
await self.xmpp.send_presence(
status=status,
show=away.value,
to=to
)
def set_avatar(self, avatar: Avatar) -> None:
"""Sets the client's avatar and updates it for all friends.
Parameters
----------
avatar: :class:`Avatar`
The avatar to set.
"""
self.avatar = avatar
self.party.update_presence()
async def fetch_lightswitch_status(self,
service_id: str = 'Fortnite') -> bool:
"""|coro|
Fetches the lightswitch status of an epicgames service.
Parameters
----------
service_id: :class:`str`
The service id to check status for.
Raises
------
ValueError
The returned data was empty. Most likely because service_id is not
valid.
HTTPException
An error occured when requesting.
Returns
-------
:class:`bool`
``True`` if service is up else ``False``
"""
status = await self.http.lightswitch_get_status(service_id=service_id)
if len(status) == 0:
raise ValueError('emtpy lightswitch response')
return True if status[0].get('status') == 'UP' else False
async def fetch_item_shop(self) -> Store:
"""|coro|
Fetches the current item shop.
Example: ::
# fetches all CIDs (character ids) of of the current item shop.
async def get_current_item_shop_cids():
store = await client.fetch_item_shop()
cids = []
for item in store.featured_items + store.daily_items:
for grant in item.grants:
if grant['type'] == 'AthenaCharacter':
cids.append(grant['asset'])
return cids
Raises
------
HTTPException
An error occured when requesting.
Returns
-------
:class:`Store`
Object representing the data from the current item shop.
"""
data = await self.http.fortnite_get_store_catalog()
return Store(self, data)
async def fetch_br_news(self) -> List[BattleRoyaleNewsPost]:
"""|coro|
Fetches news for the Battle Royale gamemode.
Raises
------
HTTPException
An error occured when requesting.
Returns
-------
:class:`list`
List[:class:`BattleRoyaleNewsPost`]
"""
data = await self.http.fortnitecontent_get()
res = []
msg = data['battleroyalenews']['news'].get('message')
if msg is not None:
res.append(BattleRoyaleNewsPost(msg))
else:
msgs = data['battleroyalenews']['news']['messages']
for msg in msgs:
res.append(BattleRoyaleNewsPost(msg))
return res
async def fetch_br_playlists(self) -> List[Playlist]:
"""|coro|
Fetches all playlists registered on Fortnite. This includes all
previous gamemodes that is no longer active.
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
List[:class:`Playlist`]
List containing all playlists registered on Fortnite.
"""
data = await self.http.fortnitecontent_get()
raw = data['playlistinformation']['playlist_info']['playlists']
playlists = []
for playlist in raw:
try:
p = Playlist(playlist)
playlists.append(p)
except KeyError:
pass
return playlists
async def fetch_active_ltms(self, region: Region) -> List[str]:
"""|coro|
Fetches active LTMs for a specific region.
Parameters
----------
region: :class:`Region`
The region to request active LTMs for.
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
List[:class:`str`]
List of internal playlist names. Returns an empty list of none
LTMs are for the specified region.
"""
data = await self.http.fortnite_get_timeline()
states = data['channels']['client-matchmaking']['states']
region_data = states[len(states) - 1]['state']['region'].get(
region.value, {})
return region_data.get('eventFlagsForcedOn', [])
| 34.97972 | 149 | 0.559047 |
bcadac953ab2e9cc0287be2846abe4a8d6051cc7 | 4,058 | py | Python | tests/components/homekit/test_aidmanager.py | marioedani/homeassistant-core | 2bc5db857ec6aa605ea7ff363654db2109f7cec4 | [
"Apache-2.0"
] | 1 | 2019-11-04T12:18:19.000Z | 2019-11-04T12:18:19.000Z | tests/components/homekit/test_aidmanager.py | marioedani/homeassistant-core | 2bc5db857ec6aa605ea7ff363654db2109f7cec4 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:31:36.000Z | 2022-03-12T01:00:03.000Z | tests/components/homekit/test_aidmanager.py | marioedani/homeassistant-core | 2bc5db857ec6aa605ea7ff363654db2109f7cec4 | [
"Apache-2.0"
] | 1 | 2020-06-01T12:43:03.000Z | 2020-06-01T12:43:03.000Z | """Tests for the HomeKit AID manager."""
from asynctest import patch
import pytest
from homeassistant.components.homekit.aidmanager import (
AccessoryAidStorage,
get_system_unique_id,
)
from homeassistant.helpers import device_registry
from tests.common import MockConfigEntry, mock_device_registry, mock_registry
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_aid_generation(hass, device_reg, entity_reg):
"""Test generating aids."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
light_ent = entity_reg.async_get_or_create(
"light", "device", "unique_id", device_id=device_entry.id
)
light_ent2 = entity_reg.async_get_or_create(
"light", "device", "other_unique_id", device_id=device_entry.id
)
remote_ent = entity_reg.async_get_or_create(
"remote", "device", "unique_id", device_id=device_entry.id
)
hass.states.async_set(light_ent.entity_id, "on")
hass.states.async_set(light_ent2.entity_id, "on")
hass.states.async_set(remote_ent.entity_id, "on")
hass.states.async_set("remote.has_no_unique_id", "on")
with patch(
"homeassistant.components.homekit.aidmanager.AccessoryAidStorage.async_schedule_save"
):
aid_storage = AccessoryAidStorage(hass)
await aid_storage.async_initialize()
for _ in range(0, 2):
assert (
aid_storage.get_or_allocate_aid_for_entity_id(light_ent.entity_id)
== 1692141785
)
assert (
aid_storage.get_or_allocate_aid_for_entity_id(light_ent2.entity_id)
== 2732133210
)
assert (
aid_storage.get_or_allocate_aid_for_entity_id(remote_ent.entity_id)
== 1867188557
)
assert (
aid_storage.get_or_allocate_aid_for_entity_id("remote.has_no_unique_id")
== 1872038229
)
aid_storage.delete_aid(get_system_unique_id(light_ent))
aid_storage.delete_aid(get_system_unique_id(light_ent2))
aid_storage.delete_aid(get_system_unique_id(remote_ent))
aid_storage.delete_aid("non-existant-one")
for _ in range(0, 2):
assert (
aid_storage.get_or_allocate_aid_for_entity_id(light_ent.entity_id)
== 1692141785
)
assert (
aid_storage.get_or_allocate_aid_for_entity_id(light_ent2.entity_id)
== 2732133210
)
assert (
aid_storage.get_or_allocate_aid_for_entity_id(remote_ent.entity_id)
== 1867188557
)
assert (
aid_storage.get_or_allocate_aid_for_entity_id("remote.has_no_unique_id")
== 1872038229
)
async def test_aid_adler32_collision(hass, device_reg, entity_reg):
"""Test generating aids."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
with patch(
"homeassistant.components.homekit.aidmanager.AccessoryAidStorage.async_schedule_save"
):
aid_storage = AccessoryAidStorage(hass)
await aid_storage.async_initialize()
seen_aids = set()
for unique_id in range(0, 202):
ent = entity_reg.async_get_or_create(
"light", "device", unique_id, device_id=device_entry.id
)
hass.states.async_set(ent.entity_id, "on")
aid = aid_storage.get_or_allocate_aid_for_entity_id(ent.entity_id)
assert aid not in seen_aids
seen_aids.add(aid)
| 33.53719 | 93 | 0.689009 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.