input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return lowest
def func_2868949534bb4b1e9a04051fdde991e8():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return larger
def func_800e0609b09b4b6b8bc983e503465154():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return needed_budget
def func_83762b7b93da4316a3aab32ef573f470():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return exclude
def func_54db9053ea104d67a67de804bb2df77c():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return next_larger
def func_a9d5a2789dd44d878d1db75963f878e1():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
return ret
def func_e593bc4a0b794e2aa7edf31ef735a7c9():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = | |
<gh_stars>0
#!/usr/bin/env python
import os
import sys
import argparse
import math
import subprocess
global MAKEBLASTDB
global TBLASTN
global CDHIT
global MAFFT
ALPHABET = {'A','C','G','T'}
DEG_CODE = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'AG': 'R',
'CT': 'Y',
'CG': 'S',
'AT': 'W',
'GT': 'K',
'AC': 'M',
'CGT': 'B',
'AGT': 'D',
'ACT': 'H',
'ACG': 'V',
'ACGT': 'N',
'R': 'AG',
'Y': 'CT',
'S': 'CG',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT'
}
RC_CODE = {
'A': 'T',
'C': 'G',
'T': 'A',
'G': 'C',
'R': 'Y',
'Y': 'R',
'S': 'S',
'W': 'W',
'K': 'M',
'M': 'K',
'B': 'V',
'D': 'H',
'H': 'D',
'V': 'B',
'N': 'N'
}
##TODO: better handling of Ns?
def shannonEntropy(seq):
seq_nogap = seq.replace('-','')
H = 0
for letter in ALPHABET:
p = seq_nogap.count(letter) * 1.0 / len(seq_nogap)
#print p
if p > 0:
H = H + (p * math.log(p, 4))
if H != 0: H = H * -1
return(H)
def scanEntropy(faFile, outPrefix, output, PRIMER_NAMES, kmer):
seqNamesAll = []
seqNamesNoPrimers = []
seqs = []
cols = ""
curSeq = ""
fa = open(faFile, 'r')
##read and organize data
while True:
line = fa.readline().strip()
if line == "":
if curSeq != "":
curSeq = curSeq.upper()
##add previous sequence
seqs.append(curSeq)
if seqNamesAll[-1] not in PRIMER_NAMES:
if len(cols) == 0: cols = list(curSeq)
else:
for i in range(len(curSeq)):
cols[i] = cols[i] + curSeq[i]
break
if line.startswith(">"):
if curSeq != "":
curSeq = curSeq.upper()
##add previous sequence
seqs.append(curSeq)
if seqNamesAll[-1] not in PRIMER_NAMES:
if len(cols) == 0: cols = list(curSeq)
else:
for i in range(len(curSeq)):
cols[i] = cols[i] + curSeq[i]
curSeq = ""
name = line[1:]
seqNamesAll.append(name)
if name not in PRIMER_NAMES: seqNamesNoPrimers.append(name)
else:
curSeq = curSeq + line
seqStats = open(outPrefix+".05.sequence_statistics.txt", 'w')
seqStats.write("#sequence\tprimer\tnumMismatches\n")
##get primer statistics
coordToPrimer = {}
primerToCoord = {}
for i in range(len(seqNamesAll)):
if seqNamesAll[i] in PRIMER_NAMES:
primerName = seqNamesAll[i]
primerSeq = seqs[i]
for j in range(len(primerSeq)):
if primerSeq[j] != "-":
primerToCoord[primerName] = j
if j in coordToPrimer:
coordToPrimer[j] = coordToPrimer[j]+", "+primerName
else:
coordToPrimer[j] = primerName
break
seqToMismatch = {}
entropy = 0.0
count = 1
conSeq = ""
primerLength = 0
for j in range(len(primerSeq)):
if primerSeq[j] != "-":
primerLength += 1
##some primer statistics##
entropy = entropy + shannonEntropy(cols[j])
count = count * len(DEG_CODE[primerSeq[j]])
conSeq = conSeq + primerSeq[j]
for x in range(len(cols[j])):
okayLetters = DEG_CODE[primerSeq[j]]
if cols[j][x] != okayLetters and cols[j][x] not in okayLetters:
if seqNamesNoPrimers[x] in seqToMismatch:
seqToMismatch[seqNamesNoPrimers[x]] = seqToMismatch[seqNamesNoPrimers[x]] + 1
else:
seqToMismatch[seqNamesNoPrimers[x]] = 1
print "%s\tcoordinate: %d\tentropy: %0.2f\tnumPrimers:%d\t%s" % (primerName, (primerToCoord[primerName]+1), entropy/primerLength, count, conSeq)
print "%s captures:" % primerName
histMismatches = {}
maxMismatch = 1
for seq, mismatch in seqToMismatch.iteritems():
seqStats.write("%s\t%s\t%d\n" % (seq, seqNamesAll[i], mismatch))
#print seq, mismatch
if mismatch > maxMismatch: maxMismatch = mismatch
if mismatch in histMismatches:
histMismatches[mismatch] = histMismatches[mismatch] + 1
else:
histMismatches[mismatch] = 1
for seq in seqNamesNoPrimers:
if seq not in seqToMismatch:
seqStats.write("%s\t%s\t0\n" % (seq, seqNamesAll[i]))
#print histMismatches
print "\t%d sequences with 0 mismatches" % (len(seqNamesNoPrimers) - len(seqToMismatch))
for n in range(1,maxMismatch+1):
if n in histMismatches:
print "\t%d sequences with %d mismatch(es)" % (histMismatches[n], n)
else:
print "\t0 sequences with %d mismatch(es)" %n
print ""
seqStats.close()
##get column statistics
entropy = []
gap = []
consensus = []
count = []
for j in range(len(cols)):
entropy.append(shannonEntropy(cols[j]))
gap.append(cols[j].count("-"))
noGaps = cols[j].replace("-", "")
count.append(len(set(noGaps)))
uniq = "".join(sorted(set(noGaps)))
##get rid of any degenerate bases
uniqNodeg = ""
for base in uniq:
if base not in {"A", "C", "T", "G"}:
uniqNodeg += DEG_CODE[base]
else:
uniqNodeg += base
uniqFinal = "".join(sorted(set(uniqNodeg)))
consensus.append(DEG_CODE[uniqFinal])
outfile = open(output, 'w')
outfile.write("#coordinate\tentropy\tnumGaps\tnumPrimers\tconsensusSequence\tprimerName\n")
for j in range(len(cols)-kmer):
cumEntropy = 0.0
cumGaps = 0
cumCount = 1
conSeq = ""
for k in range(kmer):
cumEntropy += entropy[j+k]
cumGaps += gap[j+k]
cumCount = cumCount * count[j+k]
conSeq += consensus[j+k]
if j in coordToPrimer:
outfile.write("%d\t%.2f\t%d\t%d\t%s\t%s\n" %(j+1, cumEntropy/kmer, cumGaps, cumCount, conSeq, coordToPrimer[j]))
else:
outfile.write("%d\t%.2f\t%d\t%s\t%s\n" % (j+1, cumEntropy/kmer, cumGaps, cumCount, conSeq))
def filterVirusDb(virusDb, refProtein, outPrefix, refProtOnly):
alignStats = {}
#find length of refProtein
refProt = open(refProtein, 'r')
numEntries = 0
refLength = 0
while True:
line = refProt.readline()
if line == "": break
if line.startswith(">"):
numEntries += 1
else:
refLength += len(line.strip())
if numEntries > 1:
exit("Error: refProtein contains more than one reference protein sequence")
CMD = "%s -dbtype nucl -in %s" % (MAKEBLASTDB, virusDb)
print CMD
os.system(CMD)
CMD = "\n%s -query %s -db %s -outfmt 6 -out %s.01.tblastn.txt -max_target_seqs 9999999" % (TBLASTN, refProtein, virusDb, outPrefix)
print CMD
os.system(CMD)
results = open("%s.01.tblastn.txt" % outPrefix)
keep = set()
while True:
line = results.readline()
if line == "": break
line = line.split()
refName = line[0].strip()
seqName = line[1].strip()
length = int(line[3].strip())
if float(line[10].strip()) > 0.05: continue
if length > (0.8 * refLength):
keep.add(seqName)
##if refProtOnly flag is on, keep track of alignment statistics
start = int(line[8].strip())
end = int(line[9].strip())
if seqName in alignStats:
curStats = alignStats[seqName]
if start < curStats[0]: curStats[0] = start
if end > curStats[1]: curStats[1] = end
else:
curStats = [start,end]
alignStats[seqName] = curStats
#print alignStats
##Make filtered fasta
virusFile = open(virusDb, 'r')
virusDbCount = 0
filtVirusDb = {}
seq = ""
for line in virusFile.readlines():
if line.startswith(">"):
virusDbCount += 1
##add previous sequence to virusDb
if seq != "" and name in keep: filtVirusDb[name] = seq
name = line[1:].split()[0].strip()
seq = ""
else:
seq += line.strip()
##add last entry
if name in keep: filtVirusDb[name] = seq
filtVirusFile = open(outPrefix+".01.homologous.fa", 'w')
for name,seq in filtVirusDb.iteritems():
filtVirusFile.write(">"+name+"\n")
if refProtOnly:
curStats = alignStats[name]
start = curStats[0]-201
if start < 0: start = 0
end = curStats[1]+200
if end > len(seq): end = len(seq)
filtVirusFile.write(seq[start:end]+"\n")
else:
filtVirusFile.write(seq+"\n")
filtVirusFile.close()
#print filtVirusDb
print("\nKeeping %d sequences with homology to %s out of total of %d sequences. Results written to %s.01.homologous.fa\n" % (len(keep), refName, virusDbCount, outPrefix))
return(outPrefix+".01.homologous.fa")
def filterN(fasta, outPrefix, filtN):
fastaFile = open(fasta, 'r')
statsFile = open(outPrefix+".02.filtN.stats.txt", 'w')
filtFile = open(outPrefix+".02.filtN.fa", 'w')
stats = {}
length = 0
numN = 0
percN = 0
name = ""
seq = ""
numTotal = 0
numKeep = 0
for line in fastaFile.readlines():
if line.startswith(">"):
numTotal += 1
##keep track of entry before
if name != "":
percN = (numN * 1.0) / length
stats[name] = [length, numN, percN, seq]
statsFile.write("%s\t%d\t%d\t%0.2f\n" % (name.split()[0].strip(), length, numN, percN))
if percN <= filtN:
filtFile.write(">%s\n%s\n" % (name, seq))
numKeep += 1
##reset variables
name = line[1:].strip()
length = 0
numN = 0
seq = ""
else:
length += len(line.strip())
numN += line.upper().count('N')
seq += line.strip()
#add last entry
percN = (numN * 1.0) / length
stats[name] = [length, numN, percN, seq]
statsFile.write("%s\t%d\t%d\t%0.2f\n" % (name, length, numN, percN))
if percN <= filtN:
filtFile.write(">%s\n%s\n" % (name, seq))
numKeep += 1
filtFile.close()
statsFile.close()
print "\tKeeping %d out of %d records with N content <= %0.2f" % (numKeep, numTotal, filtN)
def rc(seq):
newSeq = ""
for i in range(len(seq),0,-1):
newSeq += RC_CODE[seq[(i-1)]]
#print newSeq
return newSeq
def main():
parser = argparse.ArgumentParser(description='assessPrimers')
parser.add_argument('virusDb', type=str, help='fasta file of all viral sequences (NUCLEOTIDE)')
parser.add_argument('f_primers', type=str, help='fasta file of forward primer sequences (5\'->3\' NUCLEOTIDE)')
parser.add_argument('r_primers', type=str, help='fasta file of reverse primer sequences (5\'->3\' NUCLEOTIDE)')
parser.add_argument('outPrefix', type=str, help='prefix for output files')
parser.add_argument('--refProtein', type=str, help='fasta file of reference protein sequence (AMINO ACID)')
parser.add_argument('--overwrite', action='store_true', help='by default, this software will use intermediate files if they exist rather than re-run steps. set this flag to overwrite existing intermediate files.')
parser.add_argument('--blastDir', type=str, help='path to folder with BLAST tools. Specifically need makeblastdb and tblastn.')
parser.add_argument('--cdhit', type=str, help='path to cd-hit-est')
parser.add_argument('--mafft', type=str, help='path to mafft')
parser.add_argument('--filtN', type=float, default=0.05, help='threshold for filtering out sequences with N content higher than filtN. Default is 0.05')
parser.add_argument('--refProtOnly', action='store_true', help='If a refProtein is provided, setting this flag will extract only the portion of sequences that align to the refProtein.')
parser.add_argument('--idCutoff', type=float, default=0.75, help='sequences with >idCutoff similarity will be collapsed into one representative sequence. default = 0.9')
parser.add_argument('--singleMSA', action='store_true', help='flag to align all primers together in a single multiple sequence alignment. default is to align each primer pair separately.')
parser.add_argument('--kmer', type=int, default = 20, help="length of kmer to calculate entropy for")
args = parser.parse_args()
global MAKEBLASTDB
global TBLASTN
global CDHIT
global MAFFT
#TODO: check that all software is installed
MAKEBLASTDB = 'makeblastdb'
TBLASTN = 'tblastn'
CDHIT = 'cd-hit'
MAFFT = 'mafft'
if args.blastDir is not None:
MAKEBLASTDB = args.blastDir+"/makeblastdb"
TBLASTN = args.blastDir+"/tblastn"
if args.cdhit is not None: CDHIT = args.cdhit
if args.mafft is not None: MAFFT = args.mafft
F_PRIMERS = []
R_PRIMERS = []
PRIMER_NAMES = []
#TODO: Make sure primers have no illegal basepairs
#STORE PRIMER INFORMATION
f_primers = open(args.f_primers, 'r')
name = ""
seq = ""
for line in f_primers.readlines():
if line[0] == ">":
if name != "":
F_PRIMERS.append([name,seq])
name = ""
seq = ""
name = line[1:].strip()
PRIMER_NAMES.append(name)
else:
seq += line.strip()
#add last entry
F_PRIMERS.append([name, seq])
f_primers.close()
#print F_PRIMERS
#STORE PRIMER INFORMATION
r_primers = open(args.r_primers, 'r')
name = ""
seq = ""
for line in r_primers.readlines():
#print line
if line[0] == ">":
if name != "":
R_PRIMERS.append([name, rc(seq)])
name = ""
seq = ""
name = line[1:].strip()
PRIMER_NAMES.append(name)
else:
seq += line.strip()
#print "seq: "+seq
#add last entry
R_PRIMERS.append([name,rc(seq)])
r_primers.close()
print "\n***************************************************"
print "STEP 1: FILTER FOR SEQUENCES WITH HOMOLOGY TO REFERENCE PROTEIN\n"
if args.refProtein is not None:
if args.overwrite or not os.path.exists(args.outPrefix+".01.homologous.fa"):
virusDbFilt = filterVirusDb(args.virusDb, args.refProtein, args.outPrefix, args.refProtOnly)
else:
print "%s.01.homologous.fa found! Skipping step..." % args.outPrefix
virusDbFilt = args.outPrefix+".01.homologous.fa"
else:
print "--refProtein not used. Skipping step..."
virusDbFilt = args.virusDb
print "\n***************************************************"
print "STEP 2: REMOVE SEQUENCES WITH HIGH N CONTENT\n"
if args.overwrite or not os.path.exists(args.outPrefix+".02.filtN.fa"):
filterN(virusDbFilt, args.outPrefix, args.filtN)
else:
print "%s.02.filtN.fa found! Skipping step..." % args.outPrefix
print "\n***************************************************"
print "STEP 3: CLUSTER SEQUENCES WITH >%0.2f SIMILARITY. This step may take a while.\n" % args.idCutoff
if args.overwrite or not os.path.exists(args.outPrefix+".03.cd-hit.fa"):
CMD = "%s -i %s -o %s -c %0.2f" % (CDHIT, args.outPrefix+".02.filtN.fa", args.outPrefix+".03.cd-hit.fa", args.idCutoff)
print CMD
os.system(CMD)
else:
print "%s.03.cd-hit.fa found! Skipping step..." % args.outPrefix
print "\n****************************************************"
print "STEP 4: MAKE MULTIPLE SEQUENCE ALIGNMENT."
if args.singleMSA:
print "Aligning all primers in a single multiple sequence alignment.\n"
if args.overwrite or not os.path.exists(args.outPrefix+".04.mafft.all_primers.fa"):
os.system("cp "+args.outPrefix+".03.cd-hit.fa "+args.outPrefix+".03.cd-hit.all_primers.fa")
cdhit_primers = open(args.outPrefix+".03.cd-hit.all_primers.fa", 'a')
for i in range(len(F_PRIMERS)):
cdhit_primers.write(">%s\n%s\n" % (F_PRIMERS[i][0], F_PRIMERS[i][1]))
cdhit_primers.write(">%s\n%s\n" % (R_PRIMERS[i][0], R_PRIMERS[i][1]))
cdhit_primers.close()
CMD = "%s --maxiterate 1000 --localpair %s > %s" % (MAFFT, args.outPrefix+".03.cd-hit.all_primers.fa", args.outPrefix+".04.mafft.all_primers.fa")
print CMD
os.system(CMD)
else:
print "%s.04.mafft.all_primers.fa found! Skipping step..." % args.outPrefix
else:
print "This step may take a while. If you are testing multiple primers, you can speed up this step by aligning all primers together with the --singleMSA option.\n"
for i in range(len(F_PRIMERS)):
curNo = str((i+1))
if | |
0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x01, 0xF8, 0x00, # OOOOOO
0x06, 0x07, 0x00, # OO OOO
0x18, 0x01, 0xC0, # OO OOO
0x30, 0x00, 0xE0, # OO OOO
0x20, 0x00, 0x70, # O OOO
0x40, 0x00, 0x38, # O OOO
0x40, 0x00, 0x3C, # O OOOO
0x80, 0x00, 0x1C, # O OOO
0x80, 0x00, 0x1E, # O OOOO
0x80, 0x00, 0x1E, # O OOOO
0x80, 0x00, 0x1E, # O OOOO
0x80, 0x00, 0x1E, # O OOOO
0xC0, 0x00, 0x3E, # OO OOOOO
0x40, 0x00, 0x3E, # O OOOOO
0x60, 0x00, 0x7C, # OO OOOOO
0x30, 0x00, 0xFC, # OO OOOOOO
0x18, 0x01, 0xF8, # OO OOOOOO
0x0E, 0x07, 0xF8, # OOO OOOOOOOO
0x07, 0xFF, 0xF0, # OOOOOOOOOOOOOOO
0x01, 0xFF, 0xC0, # OOOOOOOOOOO
0x00, 0x3F, 0x00, # OOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8000 'n' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8096 'o' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xE0, 0x00, 0x38, # OOO OOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8192 'p' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xF0, 0x00, 0x78, # OOOO OOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8288 'q' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0xE0, # OOOOOOOOOOOOOOOOOOO
0x80, 0x00, 0x70, # O OOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0x7F, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOO
0x3F, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8384 'r' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x3F, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOO
0x7F, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0xF8, # OOOOOOOOOOOOOOOOOOOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x78, # O OOOO
0x80, 0x00, 0x70, # O OOO
0xFF, 0xFF, 0xE0, # OOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @8480 's' (8 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x18, # OO
0x3C, # OOOO
0x3C, # OOOO
0x7E, # OOOOOO
0xFF, # OOOOOOOO
0xFF, # OOOOOOOO
0x7E, # OOOOOO
0x3C, # OOOO
0x3C, # OOOO
0x18, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @8512 't' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x80, 0x00, # O
0x01, 0xC0, 0x00, # OOO
0x01, 0xC0, 0x00, # OOO
0x03, 0xE0, 0x00, # OOOOO
0x07, 0xF0, 0x00, # OOOOOOO
0x07, 0xF0, 0x00, # OOOOOOO
0x0F, 0xF8, 0x00, # OOOOOOOOO
0x1F, 0xFC, 0x00, # OOOOOOOOOOO
0x3F, 0xFE, 0x00, # OOOOOOOOOOOOO
0x3F, 0xFE, 0x00, # OOOOOOOOOOOOO
0x7F, 0xFF, 0x00, # OOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, # OOOOOOOOOOOOOOOOO
0xFF, 0xFF, 0x80, | |
<reponame>askingalot/amy
import re
from dal import autocomplete
import django_filters
from django.db.models import Q
from django.forms import widgets
from django_countries import Countries
from workshops.forms import bootstrap_helper_filter, SIDEBAR_DAL_WIDTH
from workshops.models import (
Event,
Organization,
Person,
Airport,
EventRequest,
Tag,
Task,
Award,
InvoiceRequest,
EventSubmission,
DCSelfOrganizedEventRequest,
TrainingRequest,
Membership,
)
EMPTY_SELECTION = (None, '---------')
class AllCountriesFilter(django_filters.ChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.name).values_list(self.name, flat=True)
choices = [o for o in qs if o]
countries = Countries()
countries.only = choices
self.extra['choices'] = list(countries)
self.extra['choices'].insert(0, EMPTY_SELECTION)
return super().field
class ForeignKeyAllValuesFilter(django_filters.ChoiceFilter):
def __init__(self, model, *args, **kwargs):
self.lookup_model = model
super().__init__(*args, **kwargs)
@property
def field(self):
name = self.name
model = self.lookup_model
qs1 = self.model._default_manager.distinct()
qs1 = qs1.order_by(name).values_list(name, flat=True)
qs2 = model.objects.filter(pk__in=qs1)
self.extra['choices'] = [(o.pk, str(o)) for o in qs2]
self.extra['choices'].insert(0, EMPTY_SELECTION)
return super().field
class EventStateFilter(django_filters.ChoiceFilter):
def filter(self, qs, value):
if isinstance(value, django_filters.fields.Lookup):
value = value.value
# no filtering
if value in ([], (), {}, None, '', 'all'):
return qs
# no need to check if value exists in self.extra['choices'] because
# validation is done by django_filters
try:
return getattr(qs, value)()
except AttributeError:
return qs
class AMYFilterSet(django_filters.FilterSet):
"""
This base class sets FormHelper.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set default FormHelper
self.form.helper = bootstrap_helper_filter
class EventFilter(AMYFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person)
host = ForeignKeyAllValuesFilter(Organization)
administrator = ForeignKeyAllValuesFilter(Organization)
STATUS_CHOICES = [
('', 'All'),
('active', 'Active'),
('past_events', 'Past'),
('ongoing_events', 'Ongoing'),
('upcoming_events', 'Upcoming'),
('unpublished_events', 'Unpublished'),
('published_events', 'Published'),
('uninvoiced_events', 'Uninvoiced'),
('metadata_changed', 'Detected changes in metadata'),
]
state = EventStateFilter(choices=STATUS_CHOICES, label='Status')
invoice_status = django_filters.ChoiceFilter(
choices=(EMPTY_SELECTION, ) + Event.INVOICED_CHOICES,
)
country = AllCountriesFilter()
class Meta:
model = Event
fields = [
'assigned_to',
'tags',
'host',
'administrator',
'invoice_status',
'completed',
'country',
]
order_by = ['-slug', 'slug', 'start', '-start', 'end', '-end']
def filter_active_eventrequest(qs, name, value):
if value == 'true':
return qs.filter(active=True)
elif value == 'false':
return qs.filter(active=False)
return qs
class EventRequestFilter(AMYFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person)
country = AllCountriesFilter()
active = django_filters.ChoiceFilter(
choices=(('all', 'All'), ('true', 'Open'), ('false', 'Closed')),
label='Status', method=filter_active_eventrequest,
widget=widgets.RadioSelect,
)
workshop_type = django_filters.ChoiceFilter(
choices=(('', 'All'), ('swc', 'Software-Carpentry'),
('dc', 'Data-Carpentry')),
label='Workshop type',
widget=widgets.RadioSelect,
)
class Meta:
model = EventRequest
fields = [
'assigned_to',
'workshop_type',
'active',
'country',
]
order_by = ['-created_at', 'created_at']
class OrganizationFilter(AMYFilterSet):
country = AllCountriesFilter()
membership__variant = django_filters.MultipleChoiceFilter(
label='Memberships (current or past)',
choices=Membership.MEMBERSHIP_CHOICES,
)
class Meta:
model = Organization
fields = [
'country',
]
order_by = ['fullname', '-fullname', 'domain', '-domain', ]
def filter_taught_workshops(queryset, name, values):
"""Limit Persons to only instructors from events with specific tags.
This needs to be in a separate function because django-filters doesn't
support `action` parameter as supposed, ie. with
`method='filter_taught_workshops'` it doesn't call the method; instead it
tries calling a string, which results in error."""
if not values:
return queryset
return queryset.filter(task__role__name='instructor',
task__event__tags__in=values) \
.distinct()
class MembershipFilter(AMYFilterSet):
organization_name = django_filters.CharFilter(
label='Organization name',
name='organization__fullname',
lookup_expr='icontains',
)
MEMBERSHIP_CHOICES = (('', 'Any'),) + Membership.MEMBERSHIP_CHOICES
variant = django_filters.ChoiceFilter(choices=MEMBERSHIP_CHOICES)
CONTRIBUTION_CHOICES = (('', 'Any'),) + Membership.CONTRIBUTION_CHOICES
contribution_type = django_filters.ChoiceFilter(choices=CONTRIBUTION_CHOICES)
class Meta:
model = Membership
fields = [
'organization_name',
'variant',
'contribution_type',
]
class PersonFilter(AMYFilterSet):
taught_workshops = django_filters.ModelMultipleChoiceFilter(
queryset=Tag.objects.all(), label='Taught at workshops of type',
method=filter_taught_workshops,
)
class Meta:
model = Person
fields = [
'badges', 'taught_workshops',
]
order_by = ["lastname", "-lastname", "firstname", "-firstname",
"email", "-email"]
def get_order_by(self, order_value):
if order_value == 'firstname':
return ['personal', 'middle', 'family']
elif order_value == '-firstname':
return ['-personal', '-middle', '-family']
elif order_value == 'lastname':
return ['family', 'middle', 'personal']
elif order_value == '-lastname':
return ['-family', '-middle', '-personal']
return super().get_order_by(order_value)
def filter_all_persons(queryset, name, all_persons):
"""Filter only trainees when all_persons==False."""
if all_persons:
return queryset
else:
return queryset.filter(
task__role__name='learner',
task__event__tags__name='TTT').distinct()
def filter_trainees_by_trainee_name_or_email(queryset, name, value):
if value:
# '<NAME>' -> ['Harry', 'Potter']
tokens = re.split('\s+', value)
# Each token must match email address or github username or personal or
# family name.
for token in tokens:
queryset = queryset.filter(Q(personal__icontains=token) |
Q(family__icontains=token) |
Q(email__icontains=token))
return queryset
else:
return queryset
def filter_trainees_by_unevaluated_homework_presence(queryset, name, flag):
if flag: # return only trainees with an unevaluated homework
return queryset.filter(trainingprogress__state='n').distinct()
else:
return queryset
def filter_trainees_by_training_request_presence(queryset, name, flag):
if flag is None:
return queryset
elif flag is True: # return only trainees who submitted training request
return queryset.filter(trainingrequest__isnull=False).distinct()
else: # return only trainees who did not submit training request
return queryset.filter(trainingrequest__isnull=True)
def filter_trainees_by_instructor_status(queryset, name, choice):
if choice == '':
return queryset
elif choice == 'swc-and-dc':
return queryset.filter(is_swc_instructor=True, is_dc_instructor=True)
elif choice == 'swc-or-dc':
return queryset.filter(Q(is_swc_instructor=True) |
Q(is_dc_instructor=True))
elif choice == 'swc':
return queryset.filter(is_swc_instructor=True)
elif choice == 'dc':
return queryset.filter(is_dc_instructor=True)
elif choice == 'eligible':
# Instructor eligible but without any badge.
# This code is kept in Q()-expressions to allow for fast condition
# change.
return queryset.filter(
Q(instructor_eligible=True) &
(Q(is_swc_instructor=False) & Q(is_dc_instructor=False))
)
else: # choice == 'no'
return queryset.filter(is_swc_instructor=False, is_dc_instructor=False)
def filter_trainees_by_training(queryset, name, training):
if training is None:
return queryset
else:
return queryset.filter(task__role__name='learner',
task__event=training).distinct()
class TraineeFilter(AMYFilterSet):
search = django_filters.CharFilter(
method=filter_trainees_by_trainee_name_or_email,
label='Name or Email')
all_persons = django_filters.BooleanFilter(
label='Include all people, not only trainees',
method=filter_all_persons,
widget=widgets.CheckboxInput)
homework = django_filters.BooleanFilter(
label='Only trainees with unevaluated homework',
widget=widgets.CheckboxInput,
method=filter_trainees_by_unevaluated_homework_presence,
)
training_request = django_filters.BooleanFilter(
label='Is training request present?',
method=filter_trainees_by_training_request_presence,
)
is_instructor = django_filters.ChoiceFilter(
label='Is SWC/DC instructor?',
method=filter_trainees_by_instructor_status,
choices=[
('', 'Unknown'),
('swc-and-dc', 'Both SWC and DC'),
('swc-or-dc', 'SWC or DC '),
('swc', 'SWC instructor'),
('dc', 'DC instructor'),
('eligible', 'No, but eligible to be certified'),
('no', 'No'),
]
)
training = django_filters.ModelChoiceFilter(
queryset=Event.objects.ttt(),
method=filter_trainees_by_training,
label='Training',
widget=autocomplete.ModelSelect2(
url='ttt-event-lookup',
attrs=SIDEBAR_DAL_WIDTH,
),
)
class Meta:
model = Person
fields = [
'search',
'all_persons',
'homework',
'is_instructor',
'training',
]
order_by = ["-last_login", "lastname", "-lastname", "firstname", "-firstname",
"email", "-email"]
def get_order_by(self, order_value):
if order_value == 'firstname':
return ['personal', 'middle', 'family']
elif order_value == '-firstname':
return ['-personal', '-middle', '-family']
elif order_value == 'lastname':
return ['family', 'middle', 'personal']
elif order_value == '-lastname':
return ['-family', '-middle', '-personal']
else:
return super().get_order_by(order_value)
def filter_matched(queryset, name, choice):
if choice == '':
return queryset
elif choice == 'u': # unmatched
return queryset.filter(person=None)
elif choice == 'p': # matched trainee, unmatched training
return queryset.filter(person__isnull=False)\
.exclude(person__task__role__name='learner',
person__task__event__tags__name='TTT')\
.distinct()
else: # choice == 't' <==> matched trainee and training
return queryset.filter(person__task__role__name='learner',
person__task__event__tags__name='TTT')\
.distinct()
def filter_by_person(queryset, name, value):
if value == '':
return queryset
else:
# '<NAME>' -> ['Harry', 'Potter']
tokens = re.split('\s+', value)
# Each token must match email address or github username or personal or
# family name.
for token in tokens:
queryset = queryset.filter(
Q(personal__icontains=token) |
Q(middle__icontains=token) |
Q(family__icontains=token) |
Q(email__icontains=token) |
Q(person__personal__icontains=token) |
Q(person__middle__icontains=token) |
Q(person__family__icontains=token) |
Q(person__email__icontains=token)
)
return queryset
def filter_affiliation(queryset, name, affiliation):
if affiliation == '':
return queryset
else:
return queryset.filter(Q(affiliation__icontains=affiliation) |
Q(person__affiliation__icontains=affiliation)) \
.distinct()
def filter_training_requests_by_state(queryset, name, choice):
if choice == 'no_d':
return queryset.exclude(state='d')
else:
return queryset.filter(state=choice)
class TrainingRequestFilter(AMYFilterSet):
search = django_filters.CharFilter(
label='Name or Email',
method=filter_by_person,
)
group_name = django_filters.CharFilter(
name='group_name',
lookup_expr='icontains',
label='Group')
state = django_filters.ChoiceFilter(
label='State',
choices=[('no_d', 'Pending or accepted')] + TrainingRequest.STATES,
method=filter_training_requests_by_state,
)
matched = django_filters.ChoiceFilter(
label='Is Matched?',
choices=(
('', 'Unknown'),
('u', 'Unmatched'),
('p', 'Matched trainee, unmatched training'),
('t', 'Matched trainee and training'),
),
method=filter_matched,
)
affiliation = django_filters.CharFilter(
method=filter_affiliation,
)
location = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = TrainingRequest
fields = [
'search',
'group_name',
'state',
'matched',
'affiliation',
'location',
]
order_by = ['created_at',
'-created_at',
'trainee firstname',
'-trainee firstname',
'trainee lastname',
'-trainee lastname']
def get_order_by(self, order_value):
if order_value == 'trainee firstname':
return ['personal', 'family']
elif order_value == '-trainee firstname':
return ['-personal', '-family']
elif order_value == 'trainee lastname':
return ['family', 'personal']
elif order_value == '-trainee lastname':
return ['-family', '-personal']
else:
return super().get_order_by(order_value)
class TaskFilter(AMYFilterSet):
event = django_filters.ModelChoiceFilter(
queryset=Event.objects.all(),
label='Event',
widget=autocomplete.ModelSelect2(
url='event-lookup',
attrs=SIDEBAR_DAL_WIDTH,
),
)
class Meta:
model = Task
fields = [
'event',
# can't filter on person because person's name contains 3 fields:
# person.personal, person.middle, person.family
# 'person',
'role',
]
order_by = [
['event__slug', 'Event'],
['-event__slug', 'Event (descending)'],
['person__family', 'Person'],
['-person__family', 'Person (descending)'],
['role', 'Role'],
['-role', 'Role (descending)'],
]
class AirportFilter(AMYFilterSet):
fullname = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Airport
fields = [
'fullname',
]
order_by = ["iata", "-iata", "fullname", "-fullname"]
class BadgeAwardsFilter(AMYFilterSet):
awarded_after = django_filters.DateFilter(name='awarded',
lookup_expr='gte')
awarded_before = django_filters.DateFilter(name='awarded',
lookup_expr='lte')
class Meta:
model = Award
fields = (
'awarded_after', 'awarded_before', 'event',
)
order_by = [
'-awarded', 'awarded', '-person__family',
'person__family',
]
class InvoiceRequestFilter(AMYFilterSet):
STATUS_CHOICES = (('', 'All'),) + InvoiceRequest.STATUS_CHOICES
status = django_filters.ChoiceFilter(
choices=STATUS_CHOICES,
)
organization = django_filters.ModelChoiceFilter(
queryset=Organization.objects.all(),
label='Organization',
widget=autocomplete.ModelSelect2(
url='organization-lookup',
attrs=SIDEBAR_DAL_WIDTH,
),
)
class Meta:
model = InvoiceRequest
fields = [
'status',
'organization',
]
order_by = [
'-event__slug', 'event__slug',
'organization__domain', '-organization__domain',
]
def filter_active_eventsubmission(qs, name, | |
<reponame>HybridRobotics/car-racing
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as anim
from utils import base, racing_env
from system import vehicle_dynamics
from matplotlib import animation
from utils.constants import *
import pickle
# off-board controller
class PIDTracking(base.PIDTracking):
def __init__(self, vt=0.6, eyt=0.0):
base.PIDTracking.__init__(self, vt, eyt)
class MPCTracking(base.MPCTracking):
def __init__(self, mpc_lti_param, system_param):
base.MPCTracking.__init__(self, mpc_lti_param, system_param)
class MPCCBFRacing(base.MPCCBFRacing):
def __init__(self, mpc_cbf_param, system_param):
base.MPCCBFRacing.__init__(self, mpc_cbf_param, system_param)
self.realtime_flag = False
class LMPCRacingGame(base.LMPCRacingGame):
def __init__(self, lmpc_param, racing_game_param=None, system_param=None):
base.LMPCRacingGame.__init__(self, lmpc_param, racing_game_param=racing_game_param, system_param=system_param)
self.realtime_flag = False
# off-board dynamic model
class DynamicBicycleModel(base.DynamicBicycleModel):
def __init__(self, name=None, param=None, xcurv=None, xglob=None, system_param=None):
base.DynamicBicycleModel.__init__(self, name=name, param=param, system_param=system_param)
# in this estimation, the vehicles is assumed to move with input is equal to zero
def get_estimation(self, xglob, xcurv):
curv = racing_env.get_curvature(self.lap_length, self.point_and_tangent, xcurv[4])
xcurv_est = np.zeros((X_DIM,))
xglob_est = np.zeros((X_DIM,))
xcurv_est[0:3] = xcurv[0:3]
xcurv_est[3] = xcurv[3] + self.timestep * (
xcurv[2]
- (xcurv[0] * np.cos(xcurv[3]) - xcurv[1] * np.sin(xcurv[3]))
/ (1 - curv * xcurv[5])
* curv
)
xcurv_est[4] = xcurv[4] + self.timestep * (
(xcurv[0] * np.cos(xcurv[3]) - xcurv[1] * np.sin(xcurv[3])) / (1 - curv * xcurv[5])
)
xcurv_est[5] = xcurv[5] + self.timestep * (
xcurv[0] * np.sin(xcurv[3]) + xcurv[1] * np.cos(xcurv[3])
)
xglob_est[0:3] = xglob[0:3]
xglob_est[3] = xglob[3] + self.timestep * (xglob[2])
xglob_est[4] = xglob[4] + self.timestep * (
xglob[0] * np.cos(xglob[3]) - xglob[1] * np.sin(xglob[3])
)
xglob_est[4] = xglob[4] + self.timestep * (
xglob[0] * np.sin(xglob[3]) + xglob[1] * np.cos(xglob[3])
)
return xcurv_est, xglob_est
# get prediction for mpc-cbf controller
def get_trajectory_nsteps(self, n):
xcurv_nsteps = np.zeros((X_DIM, n))
xglob_nsteps = np.zeros((X_DIM, n))
for index in range(n):
if index == 0:
xcurv_est, xglob_est = self.get_estimation(self.xglob, self.xcurv)
else:
xcurv_est, xglob_est = self.get_estimation(
xglob_nsteps[:, index - 1], xcurv_nsteps[:, index - 1]
)
while xcurv_est[4] > self.lap_length:
xcurv_est[4] = xcurv_est[4] - self.lap_length
xcurv_nsteps[:, index] = xcurv_est
xglob_nsteps[:, index] = xglob_est
return xcurv_nsteps, xglob_nsteps
class NoDynamicsModel(base.NoDynamicsModel):
def __init__(self, name=None, param=None, xcurv=None, xglob=None):
base.NoDynamicsModel.__init__(self, name=name, param=param)
# off-board simulator
class CarRacingSim(base.CarRacingSim):
def __init__(self):
base.CarRacingSim.__init__(self)
self.ax = None
self.fig = None
def add_vehicle(self, vehicle):
self.vehicles[vehicle.name] = vehicle
self.vehicles[vehicle.name].set_track(self.track)
self.vehicles[vehicle.name].set_timestep(self.timestep)
def sim(
self,
sim_time=50.0,
one_lap=False,
one_lap_name=None,
animating_flag=False,
):
if one_lap == True:
current_lap = self.vehicles[one_lap_name].laps
for i in range(0, int(sim_time / self.timestep)):
for name in self.vehicles:
# update system state
self.vehicles[name].forward_one_step(self.vehicles[name].realtime_flag)
if (one_lap == True) and (self.vehicles[one_lap_name].laps > current_lap):
print("lap completed")
break
def plot_state(self, name):
laps = self.vehicles[name].laps
time = np.zeros(int(round(self.vehicles[name].time / self.timestep)) + 1)
traj = np.zeros((int(round(self.vehicles[name].time / self.timestep)) + 1, X_DIM))
counter = 0
for i in range(0, laps):
for j in range(
0,
int(
round(
(self.vehicles[name].times[i][-1] - self.vehicles[name].times[i][0])
/ self.timestep
)
),
):
time[counter] = self.vehicles[name].times[i][j]
traj[counter, :] = self.vehicles[name].xcurvs[i][j][:]
counter = counter + 1
for i in range(
0,
int(
round(
(self.vehicles[name].lap_times[-1] - self.vehicles[name].lap_times[0])
/ self.timestep
)
)
+ 1,
):
time[counter] = self.vehicles[name].lap_times[i]
traj[counter, :] = self.vehicles[name].lap_xcurvs[i][:]
counter = counter + 1
fig, axs = plt.subplots(4)
axs[0].plot(time, traj[:, 0], "-o", linewidth=1, markersize=1)
axs[0].set_xlabel("time [s]", fontsize=14)
axs[0].set_ylabel("$v_x$ [m/s]", fontsize=14)
axs[1].plot(time, traj[:, 1], "-o", linewidth=1, markersize=1)
axs[1].set_xlabel("time [s]", fontsize=14)
axs[1].set_ylabel("$v_y$ [m/s]", fontsize=14)
axs[2].plot(time, traj[:, 3], "-o", linewidth=1, markersize=1)
axs[2].set_xlabel("time [s]", fontsize=14)
axs[2].set_ylabel("$e_{\psi}$ [rad]", fontsize=14)
axs[3].plot(time, traj[:, 5], "-o", linewidth=1, markersize=1)
axs[3].set_xlabel("time [s]", fontsize=14)
axs[3].set_ylabel("$e_y$ [m]", fontsize=14)
plt.show()
def plot_states(self):
for name in self.vehicles:
self.plot_state(name)
plt.show()
def plot_input(self, name):
laps = self.vehicles[name].laps
time = np.zeros(int(round(self.vehicles[name].time / self.timestep)))
u = np.zeros((int(round(self.vehicles[name].time / self.timestep)), 2))
counter = 0
for i in range(0, laps):
for j in range(
0,
int(
round(
(self.vehicles[name].times[i][-1] - self.vehicles[name].times[i][0])
/ self.timestep
)
),
):
time[counter] = self.vehicles[name].times[i][j]
u[counter, :] = self.vehicles[name].inputs[i][j][:]
counter = counter + 1
for i in range(
0,
int(
round(
(self.vehicles[name].lap_times[-1] - self.vehicles[name].lap_times[0])
/ self.timestep
)
),
):
time[counter] = self.vehicles[name].lap_times[i]
u[counter, :] = self.vehicles[name].lap_inputs[i][:]
counter = counter + 1
fig, axs = plt.subplots(2)
axs[0].plot(time, u[:, 0], "-o", linewidth=1, markersize=1)
axs[0].set_xlabel("time [s]", fontsize=14)
axs[0].set_ylabel("$/delta$ [rad]", fontsize=14)
axs[1].plot(time, u[:, 1], "-o", linewidth=1, markersize=1)
axs[1].set_xlabel("time [s]", fontsize=14)
axs[1].set_ylabel("$a$ [m/s^2]", fontsize=14)
plt.show()
def plot_inputs(self):
for name in self.vehicles:
self.plot_input(name)
plt.show()
def plot_simulation(self):
fig, ax = plt.subplots()
# plotting racing track
self.track.plot_track(ax)
# plot trajectories
for name in self.vehicles:
laps = self.vehicles[name].laps
trajglob = np.zeros((int(round(self.vehicles[name].time / self.timestep)) + 1, X_DIM))
counter = 0
for i in range(0, laps):
for j in range(
0,
int(
round(
(self.vehicles[name].times[i][-1] - self.vehicles[name].times[i][0])
/ self.timestep
)
),
):
trajglob[counter, :] = self.vehicles[name].xglobs[i][j][:]
counter = counter + 1
for i in range(
0,
int(
round(
(self.vehicles[name].lap_times[-1] - self.vehicles[name].lap_times[0])
/ self.timestep
)
)
+ 1,
):
trajglob[counter, :] = self.vehicles[name].lap_xglobs[i][:]
counter = counter + 1
ax.plot(trajglob[:, 4], trajglob[:, 5])
plt.show()
def animate(
self, filename="untitled", ani_time=400, lap_number=None, racing_game=False, imagemagick=False
):
num_veh = len(self.vehicles) - 1
if racing_game:
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0.05, 0.07, 0.56, 0.9])
ax_1 = fig.add_axes([0.63, 0.07, 0.36, 0.9])
ax_1.set_xticks([])
ax_1.set_yticks([])
self.track.plot_track(ax_1, center_line=False)
patches_vehicles_1 = {}
patches_vehicles_lmpc_prediction = []
patches_vehicles_mpc_cbf_prediction = []
(lmpc_prediciton_line,) = ax.plot([], [])
(mpc_cbf_prediction_line,) = ax.plot([], [])
vehicles_interest = []
all_local_spline = []
all_local_traj = []
(local_line,) = ax_1.plot([], [])
(local_spline,) = ax_1.plot([], [])
for jj in range(num_veh + 1):
(local_spline_1,) = ax_1.plot([], [])
(local_traj_1,) = ax_1.plot([], [])
all_local_spline.append(local_spline_1)
all_local_traj.append(local_traj_1)
horizon_planner = self.vehicles["ego"].ctrl_policy.racing_game_param.num_horizon_planner
local_traj_xglob = np.zeros((ani_time, horizon_planner + 1, X_DIM))
local_spline_xglob = np.zeros((ani_time, horizon_planner + 1, X_DIM))
mpc_cbf_prediction = np.zeros((ani_time, 10 + 1, X_DIM))
lmpc_prediction = np.zeros((ani_time, 12 + 1, X_DIM))
all_local_traj_xglob = []
all_local_spline_xglob = []
else:
fig, ax = plt.subplots()
# plotting racing track
self.track.plot_track(ax, center_line=False)
# plot vehicles
vertex_directions = np.array([[1.0, 1.0], [1.0, -1.0], [-1.0, -1.0], [-1.0, 1.0]])
patches_vehicles = {}
trajglobs = {}
lap_number = self.vehicles["ego"].laps
sim_time = (
int(
round(
(
self.vehicles["ego"].times[lap_number - 1][-1]
- self.vehicles["ego"].times[lap_number - 1][0]
)
/ self.vehicles["ego"].timestep
)
)
+ 1
)
if ani_time > sim_time:
ani_time = sim_time
for name in self.vehicles:
if name == "ego":
face_color = "red"
else:
face_color = "blue"
edge_color = "None"
patches_vehicle = patches.Polygon(
vertex_directions,
alpha=1.0,
closed=True,
fc=face_color,
ec="None",
zorder=10,
linewidth=2,
)
if racing_game:
patches_vehicle_1 = patches.Polygon(
vertex_directions,
alpha=1.0,
closed=True,
fc=face_color,
ec="None",
zorder=10,
linewidth=2,
)
if name == "ego":
for jjjj in range(0, 6 + 1):
patch_lmpc = patches.Polygon(
vertex_directions,
alpha=1.0 - jjjj * 0.15,
closed=True,
fc="None",
zorder=10,
linewidth=2,
)
patches_vehicles_lmpc_prediction.append(patch_lmpc)
ax.add_patch(patches_vehicles_lmpc_prediction[jjjj])
for iiii in range(0, 5 + 1):
patch_mpc_cbf = patches.Polygon(
vertex_directions,
alpha=1.0 - iiii * 0.15,
closed=True,
fc="None",
zorder=10,
linewidth=2,
)
patches_vehicles_mpc_cbf_prediction.append(patch_mpc_cbf)
ax.add_patch(patches_vehicles_mpc_cbf_prediction[iiii])
if name == "ego":
if racing_game:
pass
else:
ax.add_patch(patches_vehicle)
else:
ax.add_patch(patches_vehicle)
if racing_game:
ax_1.add_patch(patches_vehicle_1)
ax_1.add_line(local_line)
ax_1.add_line(local_spline)
ax.add_line(lmpc_prediciton_line)
ax.add_line(mpc_cbf_prediction_line)
for jj in range(num_veh + 1):
ax_1.add_line(all_local_spline[jj])
ax_1.add_line(all_local_traj[jj])
ax_1.axis("equal")
patches_vehicles_1[name] = patches_vehicle_1
ax.axis("equal")
patches_vehicles[name] = patches_vehicle
counter = 0
trajglob = np.zeros((ani_time, X_DIM))
for j in range(ani_time):
trajglob[ani_time - 1 - counter, :] = self.vehicles[name].xglob_log[-1 - j][:]
if racing_game:
if name == "ego":
if self.vehicles[name].local_trajs[-1 - j] is None:
local_traj_xglob[ani_time - 1 - counter, :, :] = np.zeros(
(horizon_planner + 1, X_DIM)
)
mpc_cbf_prediction[ani_time - 1 - counter, :, :] = np.zeros(
(10 + 1, X_DIM)
)
lmpc_prediction[ani_time - 1 - counter, :, :] = self.vehicles[
name
].lmpc_prediction[-1 - j][:, :]
else:
local_traj_xglob[ani_time - 1 - counter, :, :] = self.vehicles[
name
].local_trajs[-1 - j][:, :]
mpc_cbf_prediction[ani_time - 1 - counter, :, :] = self.vehicles[
name
].mpc_cbf_prediction[-1 - j][:, :]
lmpc_prediction[ani_time - 1 - counter, :, :] = np.zeros(
(12 + 1, X_DIM)
)
if self.vehicles[name].vehicles_interest[-1 - j] is None:
vehicles_interest.insert(0, None)
all_local_traj_xglob.insert(0, None)
all_local_spline_xglob.insert(0, None)
else:
vehicles_interest.insert(
0,
self.vehicles[name].vehicles_interest[-1 - j],
)
all_local_traj_xglob.insert(
0, self.vehicles[name].all_local_trajs[-1 - j][:, :, :]
)
all_local_spline_xglob.insert(
0, self.vehicles[name].all_splines[-1 - j][:, :, :]
)
if self.vehicles[name].splines[-1 - j] is None:
local_spline_xglob[ani_time - 1 - counter, :, :] = np.zeros(
(horizon_planner + 1, X_DIM)
)
else:
local_spline_xglob[ani_time - 1 - counter, :, :] = self.vehicles[
name
].splines[-1 - j][:, :]
counter = counter + 1
trajglobs[name] = trajglob
def update(i):
if racing_game:
ax_1.set_xlim([trajglobs["ego"][i - 1, 4] - 2, trajglobs["ego"][i - 1, 4] + 2])
ax_1.set_ylim([trajglobs["ego"][i - 1, 5] - 2, trajglobs["ego"][i - 1, 5] + 2])
for name in patches_vehicles:
x, y = trajglobs[name][i - 1, 4], trajglobs[name][i - 1, 5]
psi = trajglobs[name][i - 1, 3]
l = self.vehicles[name].param.length / 2
w = self.vehicles[name].param.width / 2
| |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Setup::
mkdir -p test-data/input
mkdir -p test-data/output
mysql -u root -p
CREATE DATABASE testdb;
CREATE USER 'testusr'@'localhost' IDENTIFIED BY 'test<PASSWORD>';
GRANT ALL PRIVILEGES ON testdb.* TO 'testusr'@'%';
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import d6tstack.utils
import math
import pandas as pd
# import pyarrow as pa
# import pyarrow.parquet as pq
import ntpath
import shutil
import dask.dataframe as dd
import sqlalchemy
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class DebugLogger(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = DebugLogger('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common,allstr=True):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
if allstr:
df_all = df_all[df_all.columns].astype(str)
return df_all
def check_df_colmismatch_combine(dfg,is_common=False, convert_date=True):
dfg = dfg.drop(['filepath','filename'],1).sort_values('date').reset_index(drop=True)
if convert_date:
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
dfchk = create_files_df_colmismatch_combine(is_common,False).reset_index(drop=True)[dfg.columns]
assert dfg.equals(dfchk)
return True
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# scan header
#************************************************************
#************************************************************
def test_csv_sniff(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
# clean
combiner = CombinerCSV(fname_list=create_files_csv)
combiner.sniff_columns()
assert combiner.is_all_equal()
assert combiner.is_column_present().all().all()
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_common'] == combiner.sniff_results['columns_all']
assert combiner.sniff_results['columns_unique'] == []
# extra column
combiner = CombinerCSV(fname_list=create_files_csv_colmismatch)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert not combiner.is_column_present().all().all()
assert combiner.is_column_present().all().values.tolist()==[True, True, True, True, False]
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit', 'profit2']
assert combiner.sniff_results['columns_common'] == ['date', 'sales', 'cost', 'profit']
assert combiner.is_column_present_common().columns.tolist() == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_unique'] == ['profit2']
assert combiner.is_column_present_unique().columns.tolist() == ['profit2']
# mixed order
combiner = CombinerCSV(fname_list=create_files_csv_colreorder)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert combiner.sniff_results['df_columns_order']['profit'].values.tolist() == [3, 3, 2]
def test_csv_selectrename(create_files_csv, create_files_csv_colmismatch):
# rename
df = CombinerCSV(fname_list=create_files_csv).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'notthere':'nan'}).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'cost':'cost2'}).preview_rename()
assert df.columns.tolist()==['cost']
assert df['cost'].unique().tolist()==['cost2']
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_rename={'profit2':'profit3'}).preview_rename()
assert df.columns.tolist()==['profit2']
assert df['profit2'].unique().tolist()==[np.nan, 'profit3']
# select
l = CombinerCSV(fname_list=create_files_csv).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
l2 = CombinerCSV(fname_list=create_files_csv, columns_select_common=True).preview_select()
assert l2==l
l = CombinerCSV(fname_list=create_files_csv, columns_select=['date', 'sales', 'cost']).preview_select()
assert l == ['date', 'sales', 'cost']
l = CombinerCSV(fname_list=create_files_csv_colmismatch).preview_select()
assert l == ['date', 'sales', 'cost', 'profit', 'profit2']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
# rename+select
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
def test_to_pandas(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
df = CombinerCSV(fname_list=create_files_csv).to_pandas()
assert df.shape == (30, 6)
df = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
assert df.shape == (30, 6+1)
assert df['profit2'].isnull().unique().tolist() == [True, False]
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_pandas()
assert df.shape == (30, 6)
assert 'profit2' not in df.columns
# rename+select
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
def test_combinepreview(create_files_csv_colmismatch):
df = CombinerCSV(fname_list=create_files_csv_colmismatch).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('O'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
df = CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('<M8[ns]'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def test_tocsv(create_files_csv_colmismatch):
fname = 'test-data/output/combined.csv'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_combine(filename=fname)
assert fname == fnameout
df = pd.read_csv(fname)
dfchk = df.copy()
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_csv_combine(filename=fname)
df = pd.read_csv(fname)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'filepath', 'filename']
assert check_df_colmismatch_combine(df,is_common=True)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_csv(fname)
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
helper('test-data/output/')
df = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert df.reset_index(drop=True).equals(dfchk)
assert check_df_colmismatch_combine(df)
# check creates directory
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
_ = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir='test-data/output-tmp')
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
def test_topq(create_files_csv_colmismatch):
fname = 'test-data/output/combined.pq'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_combine(filename=fname)
assert fname == fnameout
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='pyarrow')
assert df2.equals(df)
assert check_df_colmismatch_combine(df)
df = dd.read_parquet(fname)
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='fastparquet')
assert df2.equals(df)
df3 = pd.read_parquet(fname, engine='pyarrow')
assert df3.equals(df)
assert check_df_colmismatch_combine(df)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
df = dd.read_parquet('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.pq')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
# todo: write tests such that compare to concat df not always repeat same code to test shape and columns
def test_tosql(create_files_csv_colmismatch):
tblname = 'testd6tstack'
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
def helper(uri):
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_sql_combine(uri, tblname, 'replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df)
# with date convert
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_sql_combine(uri, tblname, 'replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
helper(uri)
uri = 'mysql+pymysql://testusr:testpwd@localhost/testdb'
helper(uri)
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_psql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.shape == (30, 4+1+2)
assert check_df_colmismatch_combine(df)
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_psql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
uri = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'
| |
self.B2.dag()
return self.Hqrot + self.Hint + Scl * Hdrive
def calcStaticZZ(self, H):
self.dressedEnergyLevels(H=H)
self.staticZZ = self.eigenlevels[0][self.keys['11']] - self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['01']]
return self.staticZZ
class QQQ():
# For tunable coupling simulation
def __init__(self, Q1, Q2, Qc, gc1, gc2, g12):
# duffing oscillator model
# Unit in [GHz]
self.gc1 = gc1
self.gc2 = gc2
self.g12 = g12
self.Q1 = Q1
self.Q2 = Q2
self.Qc = Qc
self.Nq1, self.Nq2, self.Nqc = Q1.Nq, Q2.Nq, Qc.Nq
iq1, iq2, iqc = qt.qeye(self.Nq1), qt.qeye(self.Nq2), qt.qeye(self.Nqc)
b1, b2, bc = qt.destroy(self.Nq1), qt.destroy(self.Nq2), qt.destroy(self.Nqc)
self.B1 = qt.tensor(b1, iq2, iqc)
self.B2 = qt.tensor(iq1, b2, iqc)
self.Bc = qt.tensor(iq1, iq2, bc)
self.Iq1 = qt.tensor(iq1, iq2, iqc)
self.Iq2 = qt.tensor(iq1, iq2, iqc)
self.Iqc = qt.tensor(iq1, iq2, iqc)
self.Nb1 = self.B1.dag()*self.B1
self.Nb2 = self.B2.dag()*self.B2
self.Nbc = self.Bc.dag()*self.Bc
bbbb1 = self.B1.dag()*self.B1.dag()*self.B1*self.B1
bbbb2 = self.B2.dag()*self.B2.dag()*self.B2*self.B2
bbbbc = self.Bc.dag()*self.Bc.dag()*self.Bc*self.Bc
self.duff_part1 = 0.5 * self.Q1.anh * self.Nb1 * (self.Nb1 - self.Iq1) # 0.5 * Q1.anh * bbbb1
self.duff_part2 = 0.5 * self.Q2.anh * self.Nb2 * (self.Nb2 - self.Iq2) # 0.5 * Q2.anh * bbbb2
self.duff_partc = 0.5 * self.Qc.anh * self.Nbc * (self.Nbc - self.Iqc) # 0.5 * Qc.anh * bbbbc
self.Hq1 = Q1.f01 * self.Nb1 + self.duff_part1 # - self.Iq1*0
self.Hq2 = Q2.f01 * self.Nb2 + self.duff_part2 # - self.Iq2*0
self.Hqc = Qc.f01 * self.Nbc + self.duff_partc # - self.Iqc*0
self._intc1 = self.B1*self.Bc.dag() + self.B1.dag()*self.Bc
self._intc2 = self.B2*self.Bc.dag() + self.B2.dag()*self.Bc
self._int12 = self.B1*self.B2.dag() + self.B1.dag()*self.B2
# self._intc1 = (self.B1 + self.B1.dag())*(self.Bc + self.Bc.dag())
# self._intc2 = (self.B2 + self.B2.dag())*(self.Bc + self.Bc.dag())
# self._int12 = (self.B1 + self.B1.dag())*(self.B2 + self.B2.dag())
self.Hintc1 = gc1*self._intc1
self.Hintc2 = gc2*self._intc2
self.Hint12 = g12*self._int12
self.Hint = self.Hintc1 + self.Hintc2 + self.Hint12
self.Hlab = self.Hq1 + self.Hq2 + self.Hqc + self.Hint
self.eigenlevels = self.Hlab.eigenstates()
self.dressedEnergyLevels()
self.fd1 = self.eigenlevels[0][self.keys['100']] - self.eigenlevels[0][self.keys['000']]
self.fd2 = self.eigenlevels[0][self.keys['010']] - self.eigenlevels[0][self.keys['000']]
# ref : https://doi.org/10.1103/PhysRevApplied.12.054023
self.staticZZ = self.eigenlevels[0][self.keys['110']] - self.eigenlevels[0][self.keys['100']] - self.eigenlevels[0][self.keys['010']]
self.effectiveCoupling = gc1*gc2*(1/(Q1.f01-Qc.f01)+1/(Q2.f01-Qc.f01))*0.5 + g12
def dressedEnergyLevels(self):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
eigenlevels = self.eigenlevels
keys = {}
for i in range(Nq):
for j in range(Nq):
for k in range(Nq):
bra = ket_3Qsys(i, j, k, Nq, Nq, Nq).dag()
e = np.abs([(bra * eigenlevels[1])[i].tr() for i in range(Nq**3)])
index = np.argmax(e)
keys['{}{}{}'.format(i, j, k)] = index
self.keys = keys
def plotDressedEnergyLevels(self, coupler_exitation_stop=0):
# coupler_exitation_stop : coupler exitation number to be plotted.
ces = coupler_exitation_stop
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
d = self.keys
enes = self.eigenlevels
plt.figure(1, dpi=150)
cmap = plt.get_cmap("tab10")
plt.title(r'$|Q1, Q2, Qc\rangle$')
for i in range(Nq):
for j in range(Nq):
for k in range(Nq):
key = '{}{}{}'.format(i, j, k)
if key == '220' or k > ces:
break
index = d[key]
ene = enes[0][index]
if i < j:#p
s = abs(i-j)
t = s+1
elif i > j:#m
t = -abs(i-j)+1
s = t-1
elif i == j:
s = 0
t = 1
plt.hlines(ene, s, t, color=cmap(k))
plt.text(s, ene+0.4, '|'+key+r'$\rangle$'+':{:.3f}GHz'.format(ene))
plt.ylim([-1.0, ene+3])
plt.ylabel('Eigen energy [GHz]')
plt.xticks(color='None')
plt.tick_params(length=0)
plt.grid()
class RQRQR():
def __init__(self, QR1, QR2, frb, g1, g2):
# Unit in [GHz]
self.frb = frb
self.g1 = g1
self.g2 = g2
self.QR1 = QR1
self.QR2 = QR2
self.detuning1 = QR1.f01_dressed - frb
self.thermal_photon1 = qt.utilities.n_thermal(frb, QR1.f01_dressed)
self.f01_dressed1 = QR1.f01_dressed + ( 2 * (g1**2) / self.detuning1 ) * ( self.thermal_photon1 + 1/2 )
self.X1 = ((g1**2)/self.detuning1)*(QR1.Q.anh/(QR1.f01_dressed + QR1.Q.anh - frb))
self.detuning2 = QR2.f01_dressed - frb
self.thermal_photon2 = qt.utilities.n_thermal(frb, QR2.f01_dressed)
self.f01_dressed2 = QR2.f01_dressed + ( 2 * (g2**2) / self.detuning2 ) * ( self.thermal_photon2 + 1/2 )
self.X2 = ((g2**2)/self.detuning2)*(QR2.Q.anh/(QR2.f01_dressed + QR2.Q.anh - frb))
self.D12 = self.f01_dressed1 - self.f01_dressed2
self.J = g1*g2*( self.detuning1 + self.detuning2 ) / ( 2 * self.detuning1 * self.detuning2 )
self.f01_coupled1 = self.f01_dressed1 + (self.J**2)/self.D12
self.f01_coupled2 = self.f01_dressed2 - (self.J**2)/self.D12
class labFrame2Qhamiltonian_DuffingOscillator():
def __init__(self, RQRQR, Nq1, Nq2):
self.Nq1, self.Nq2 = Nq1, Nq2
Iq1, Iq2 = qt.qeye(Nq1), qt.qeye(Nq2)
b1, b2 = qt.destroy(Nq1), qt.destroy(Nq2)
Nb1, Nb2 = b1.dag()*b1, b2.dag()*b2
self.X1 = qt.tensor(pX_N(Nq1), Iq2)
self.Y1 = qt.tensor(pY_N(Nq1), Iq2)
self.Z1 = qt.tensor(pZ_N(Nq1), Iq2)
self.X2 = qt.tensor(Iq1, pX_N(Nq2))
self.Y2 = qt.tensor(Iq1, pY_N(Nq2))
self.Z2 = qt.tensor(Iq1, pZ_N(Nq2))
self.Iq1, self.Iq2 = Iq1, Iq2
self.Nb1, self.Nb2 = Nb1, Nb2
self.QR1 = RQRQR.QR1
self.QR2 = RQRQR.QR2
J = RQRQR.J
self.B1 = qt.tensor(b1, Iq2)
self.B2 = qt.tensor(Iq1, b2)
bbbb1 = b1.dag()*b1.dag()*b1*b1
bbbb2 = b2.dag()*b2.dag()*b2*b2
# Drive term @rotating frame
self.Hd1_real = self.B1 + self.B1.dag()
self.Hd1_imag = (- self.B1 + self.B1.dag())*1j
self.Hd2_real = (self.B2 + self.B2.dag())
self.Hd2_imag = (- self.B2 + self.B2.dag())*1j
q1_lab = self.QR1.f01_dressed * Nb1 + 0.5 * self.QR1.Q.anh * Nb1 * (Nb1 - Iq1)
q2_lab = self.QR2.f01_dressed * Nb2 + 0.5 * self.QR2.Q.anh * Nb2 * (Nb2 - Iq2)
self.Hqlab = qt.tensor(q1_lab, Iq2) + qt.tensor(Iq1, q2_lab)
self.Hint = J * ( qt.tensor(b1, b2.dag()) + qt.tensor(b1.dag(), b2) )
self.Hlab = self.Hqlab + self.Hint
self.dressedEnergyLevels()
self.fd1 = self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['00']]
self.fd2 = self.eigenlevels[0][self.keys['01']] - self.eigenlevels[0][self.keys['00']]
def dressedEnergyLevels(self):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
eigenlevels = self.Hlab.eigenstates()
keys = {}
for i in range(Nq):
for j in range(Nq):
k = ket_2Qsys(i, j, Nq, Nq)
e = np.abs([(k.dag() * eigenlevels[1])[i].tr() for i in range(Nq**2)])
index = np.argmax(e)
keys['{}{}'.format(i, j)] = index
self.keys = keys
self.eigenlevels = eigenlevels
def plotDressedEnergyLevels(self):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
d = self.keys
enes = self.eigenlevels
plt.figure(1)
for i in range(Nq):
for j in range(Nq):
key = '{}{}'.format(i,j)
if key == '22':
break
index = d[key]
ene = enes[0][index]
if i < j:#p
s = abs(i-j)
t = s+1
elif i > j:#m
t = -abs(i-j)+1
s = t-1
elif i == j:
s = 0
t = 1
plt.hlines(ene, s, t)
plt.text(s, ene+0.4, '|'+key+'>'+':{:.3f}GHz'.format(ene))
plt.ylim([-1.0, ene+3])
plt.ylabel('Eigen energy [GHz]')
plt.xticks(color='None')
plt.tick_params(length=0)
plt.grid()
def toRotFrameHamiltonian(self, fd:float):
Nb1, Nb2 = self.Nb1, self.Nb2
Iq1, Iq2 = self.Iq1, self.Iq2
q1_rot = (self.QR1.f01_dressed-fd) * Nb1 + 0.5 * self.QR1.Q.anh * Nb1 * (Nb1 - Iq1)
q2_rot = (self.QR2.f01_dressed-fd) * Nb2 + 0.5 * self.QR2.Q.anh * Nb2 * (Nb2 - Iq2)
self.Hqrot = qt.tensor(q1_rot, self.Iq2) + qt.tensor(self.Iq1, q2_rot)
return self.Hqrot + self.Hint
def toDoublyRotFrameHamiltonian(self, fd1:float, fd2:float):
Nb1, Nb2 = self.Nb1, self.Nb2
Iq1, Iq2 = self.Iq1, self.Iq2
q1_rot = (self.QR1.f01_dressed-fd1) * Nb1 + 0.5 * self.QR1.Q.anh * Nb1 * (Nb1 - Iq1)
q2_rot = (self.QR2.f01_dressed-fd2) * Nb2 + 0.5 * self.QR2.Q.anh * Nb2 * (Nb2 - Iq2)
self.Hqrot = qt.tensor(q1_rot, self.Iq2) + qt.tensor(self.Iq1, q2_rot)
return self.Hqrot + self.Hint
class labFrame1Qhamiltonian_DuffingOscillator():
def __init__(self, QR, Nq):
self.Nq = Nq
Iq = qt.qeye(Nq)
b = qt.destroy(Nq)
Nb = b.dag()*b
self.X = pX_N(Nq)
self.Y = pY_N(Nq)
self.Z = pZ_N(Nq)
self.Iq = Iq
self.Nb = Nb
self.QR = QR
self.B = b
# Drive term @rotating frame
self.f01_dressed = QR.f01_dressed
self.Hd1_real = self.B + self.B.dag()
self.Hd1_imag = (- self.B + self.B.dag())*1j
q1_lab = self.QR.f01_dressed * Nb + 0.5 * self.QR.Q.anh * Nb * (Nb - Iq)
self.Hqlab = q1_lab
self.Hlab = self.Hqlab
def calcUrot(self, t_list):
Urots = []
for t in t_list:
u = (1j*self.f01_dressed*t*self.Nb).expm()
Urots.append(u)
return Urots
class labFrame1Q_1R_hamiltonian():
def __init__(self, Q, R, g):
"""
params
---
Q : class instance
transmon()
R : class instance
resonator()
g : float in [GHz]
coupling constant
"""
self.Nq = Q.Nq
self.Nf = R.Nf
self.Ir = Ir = R.Ir
self.Iq = Iq = Q.Iq
self.II = qt.tensor(Iq, Ir)
self.f01 = Q.f01
self.anh = Q.anh
self.fr = R.fr
self.g = g
self.Q = Q
self.R = R
self.detuning = Q.f01 - R.fr
# self.thermal_photon = qt.utilities.n_thermal(self.fr, Q.f01)
# self.f01_dressed = Q.f01 + ( 2 * (g**2) / self.detuning ) * ( self.thermal_photon + 1/2 )
self.X = qt.tensor(Q.X, Ir)
self.Y = qt.tensor(Q.Y, Ir)
self.Z = qt.tensor(Q.Z, Ir)
self.P0 = qt.tensor(Q.P0, Ir)
self.P1 = qt.tensor(Q.P1, Ir)
self.Na = qt.tensor(Iq, R.na)
self.Nb = qt.tensor(Q.nb, Ir)
self.A = A = qt.tensor(Iq, R.a)
self.B = B = qt.tensor(Q.b, Ir)
self.HQ1 = qt.tensor(Q.Hqlab, Ir)
self.HR1 = qt.tensor(Iq, R.Hr)
self.Hint = g * ( B*A.dag() + B.dag()*A | |
import pytest
from unittest.mock import Mock, patch
import os
import numpy.testing as npt
from paths_cli.compiling.core import *
from paths_cli.compiling import compiler_for
from paths_cli.tests.compiling.utils import mock_compiler
class MockNamedObject:
# used in the tests for CategoryCompiler._compile_dict and
# CategoryCompiler.register_object
def __init__(self, data):
self.data = data
self.name = None
def named(self, name):
self.name = name
return self
def mock_named_object_factory(dct):
return MockNamedObject(**dct)
class TestParameter:
def setup(self):
self.loader = Mock(
return_value='foo',
json_type='string',
description="string 'foo'",
)
def test_parameter_info_in_loader(self):
# if parameter doesn't give json_type/description, but the
# loader does, then we return what the loader says
parameter = Parameter(name='foo_param',
loader=self.loader)
assert parameter.name == 'foo_param'
assert parameter.loader() == "foo"
assert parameter.json_type == "string"
assert parameter.description == "string 'foo'"
def test_parameter_info_local(self):
# if parameter and loader both give json_type/description, then the
# value given by the parameter takes precendence
parameter = Parameter(name='foo_param',
loader=self.loader,
json_type='int', # it's a lie!
description='override')
assert parameter.name == 'foo_param'
assert parameter.loader() == "foo"
assert parameter.json_type == "int"
assert parameter.description == "override"
def test_parameter_info_none(self):
# if neither parameter nor loader define json_type/description, then
# we should return None for those
parameter = Parameter(name="foo_param",
loader=lambda: "foo")
assert parameter.name == 'foo_param'
assert parameter.loader() == "foo"
assert parameter.json_type is None
assert parameter.description is None
@pytest.mark.parametrize('is_required', [True, False])
def test_required(self, is_required):
if is_required:
parameter = Parameter(name="foo_param", loader=self.loader)
else:
parameter = Parameter(name="foo_param", loader=self.loader,
default="bar")
assert parameter.required == is_required
def test_call(self):
# calling the parameter object should call its loader
# TODO: maybe check that we pass arguments along correctly?
parameter = Parameter(name="foo_param", loader=self.loader)
assert parameter() == "foo"
def test_to_json_schema(self):
# to_json_schema should return the expected values
parameter = Parameter(name="foo_param",
loader=self.loader)
expected = {'type': 'string',
'description': "string 'foo'"}
assert parameter.to_json_schema() == ("foo_param", expected)
class TestBuilder:
@staticmethod
def _callable(string):
return "".join(reversed(string))
def test_with_remapper(self):
# a builder that includes a remapping should use the remapper before
# the builder callable
def local_remapper(dct):
dct['string'] = dct['string'][:-1]
return dct
builder = Builder(self._callable, remapper=local_remapper)
assert builder(string="foo") == "of"
def test_imported_builder(self):
# a builder that takes a string to define its builder callable
# should import that callable, and use it for its call method
cwd = os.getcwd()
builder = Builder('os.getcwd')
assert builder() == cwd
def test_callable_builder(self):
# a builder that takes a callable as its builder should use that for
# its call method
builder = Builder(self._callable)
assert builder(string="foo") == "oof"
def test_with_after_build(self):
# a builder with an after_build parameter should use that after the
# builder callable
def local_after(obj, dct):
return obj[:-1] + dct['string']
builder = Builder(self._callable, after_build=local_after)
assert builder(string="foo") == "oofoo"
class TestInstanceCompilerPlugin:
@staticmethod
def _builder(req_param, opt_default=10, opt_override=100):
return f"{req_param}, {opt_default}, {opt_override}"
def setup(self):
identity = lambda x: x
self.parameters = [
Parameter('req_param', identity, json_type="string"),
Parameter('opt_default', identity, json_type="int", default=10),
Parameter('opt_override', identity, json_type='int',
default=100)
]
self.instance_builder = InstanceCompilerPlugin(
self._builder,
self.parameters,
name='demo',
aliases=['foo', 'bar'],
)
self.instance_builder.category = 'demo'
self.input_dict = {'req_param': "qux", 'opt_override': 25}
def test_schema_name(self):
assert self.instance_builder.schema_name == 'demo'
self.instance_builder.category = 'foo'
assert self.instance_builder.schema_name == 'demo-foo'
self.instance_builder.name = 'demo-foo'
assert self.instance_builder.schema_name == 'demo-foo'
def test_to_json_schema(self):
# to_json_schema should create a valid JSON schema entry for this
# instance builder
# TODO: this may change as I better understand JSON schema. Details
# of the JSON schema API aren't locked until I can build docs from
# our schema.
expected_schema = {
'properties': {
'name': {'type': 'string'},
'type': {'type': 'string',
'enum': ['demo']},
'req_param': {'type': 'string', 'description': None},
'opt_default': {'type': 'int', 'description': None},
'opt_override': {'type': 'int', 'description': None},
},
'required': ['req_param'],
}
name, schema = self.instance_builder.to_json_schema()
assert name == 'demo'
assert expected_schema['required'] == schema['required']
assert expected_schema['properties'] == schema['properties']
def test_compile_attrs(self):
# compile_attrs should create a dictionary with correct objects in
# the attributes from the input dictionary
expected = {'req_param': "qux", 'opt_override': 25}
# note that the parameter where we use the default value isn't
# listed: the default value should match the default used in the
# code, though!
compile_attrs = self.instance_builder.compile_attrs
assert compile_attrs(self.input_dict) == expected
def test_compile_attrs_compiler_integration(self):
# compile_attrs gives the existing named object (is-identity) if one
# of the parameters uses that compiler to load a named object
user_input = {'foo': 'named_foo'}
# full_input = {'foo': {'name': 'named_foo',
# 'type': 'baz',
# 'bar': 'should see this'}}
bar_plugin = InstanceCompilerPlugin(
builder=lambda foo: 'in bar: should not see this',
parameters=[Parameter('foo', compiler_for('foo'))],
)
foo_plugin = InstanceCompilerPlugin(
builder=lambda: 'in foo: should not see this',
parameters=[],
)
named_objs = {'named_foo': 'should see this'}
type_dispatch = {'baz': foo_plugin}
PATCH_LOC = 'paths_cli.compiling.root_compiler._COMPILERS'
compiler = mock_compiler('foo', type_dispatch=type_dispatch,
named_objs=named_objs)
with patch.dict(PATCH_LOC, {'foo': compiler}):
compiled = bar_plugin.compile_attrs(user_input)
# maps attr name 'foo' to the previous existing object
assert compiled == {'foo': 'should see this'}
def test_compile_attrs_missing_required(self):
# an InputError should be raised if a required parameter is missing
input_dict = {'opt_override': 25}
with pytest.raises(InputError, match="missing required"):
self.instance_builder.compile_attrs(input_dict)
def test_call(self):
# calling the instance builder should create the object
expected = "qux, 10, 25"
assert self.instance_builder(self.input_dict) == expected
class TestCategoryCompiler:
def setup(self):
self.compiler = CategoryCompiler(
{'foo': mock_named_object_factory},
'foo_compiler'
)
def _mock_register_obj(self):
obj = "bar"
self.compiler.all_objs.append(obj)
self.compiler.named_objs['foo'] = obj
def test_compile_str(self):
# compile_str should load a known object with the input name
self._mock_register_obj()
assert self.compiler._compile_str('foo') == "bar"
def test_compile_str_error(self):
# if compile_str is given a name that is not known, an InputError
# should be raised
self._mock_register_obj()
with pytest.raises(InputError, match="Unable to find"):
self.compiler._compile_str('baz')
@pytest.mark.parametrize('named', [True, False])
def test_compile_dict(self, named):
# compile_dct should create the object from the input dict
input_dict = {'type': 'foo', 'data': "qux"}
if named:
input_dict['name'] = 'bar'
obj = self.compiler._compile_dict(input_dict)
assert obj.data == "qux"
name = {True: 'bar', False: None}[named]
assert obj.name == name
def test_register_object_named(self):
# when registered, a named object should register with the all_objs
# list and with the named_objs dict
obj = MockNamedObject('foo')
assert obj.name is None
assert self.compiler.all_objs == []
assert self.compiler.named_objs == {}
obj = self.compiler.register_object(obj, 'bar')
assert obj.name == 'bar'
assert self.compiler.all_objs == [obj]
assert self.compiler.named_objs == {'bar': obj}
def test_register_object_unnamed(self):
# when registered, an unnamed object should register with the
# all_objs list and leave the named_objs dict unchanged
obj = MockNamedObject('foo')
assert obj.name is None
assert self.compiler.all_objs == []
assert self.compiler.named_objs == {}
obj = self.compiler.register_object(obj, None)
assert obj.name is None
assert self.compiler.all_objs == [obj]
assert self.compiler.named_objs == {}
def test_register_object_duplicate(self):
# if an attempt is made to register an object with a name that is
# already in use, an InputError should be raised, and the object
# should not register with either all_objs or named_objs
obj = MockNamedObject('foo').named('bar')
self.compiler.named_objs['bar'] = obj
self.compiler.all_objs.append(obj)
obj2 = MockNamedObject('baz')
with pytest.raises(InputError, match="already exists"):
self.compiler.register_object(obj2, 'bar')
assert self.compiler.named_objs == {'bar': obj}
assert self.compiler.all_objs == [obj]
assert obj2.name is None
def test_register_builder(self):
# a new builder can be registered and used, if it has a new name
assert len(self.compiler.type_dispatch) == 1
assert 'bar' not in self.compiler.type_dispatch
self.compiler.register_builder(lambda dct: 10, 'bar')
assert len(self.compiler.type_dispatch) == 2
assert 'bar' in self.compiler.type_dispatch
input_dict = {'type': 'bar'}
assert self.compiler(input_dict) == 10
def test_register_builder_duplicate(self):
# if an attempt is made to registere a builder with a name that is
# already in use, a RuntimeError is raised
orig = self.compiler.type_dispatch['foo']
with pytest.raises(RuntimeError, match="already registered"):
self.compiler.register_builder(lambda dct: 10, 'foo')
assert self.compiler.type_dispatch['foo'] is orig
def test_register_builder_identical(self):
# if an attempt is made to register a builder that has already been
# registered, nothin happens (since it is already registered)
orig = self.compiler.type_dispatch['foo']
self.compiler.register_builder(orig, 'foo')
@staticmethod
def _validate_obj(obj, input_type):
if input_type == 'str':
assert obj == 'bar'
elif input_type == 'dict':
assert obj.data == 'qux'
else: # -no-cov-
raise RuntimeError("Error in test setup")
@pytest.mark.parametrize('input_type', ['str', 'dict'])
def test_compile(self, input_type):
# the compile method should work whether the input is a dict
# representing an object to be compiled or string name for an
# already-compiled object
self._mock_register_obj()
input_data = {
'str': 'foo',
'dict': {'type': 'foo', 'data': 'qux'}
}[input_type]
obj = self.compiler.compile(input_data)
self._validate_obj(obj, input_type)
@pytest.mark.parametrize('input_type', ['str', 'dict'])
@pytest.mark.parametrize('as_list', [True, False])
def test_call(self, input_type, as_list):
# the call method should work whether the input is a single object
# or a list of objects (as well as whether string or dict)
| |
not be allowed to exceed `source_length`. To maintain backwards compatibility,
this is not enforced if a `key_provider` is provided.
:param dict encryption_context: Dictionary defining encryption context
:param algorithm: Algorithm to use for encryption (optional)
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param int frame_length: Frame length in bytes (optional)
"""
encryption_context = attr.ib(
hash=False, # dictionaries are not hashable
default=attr.Factory(dict),
validator=attr.validators.instance_of(dict),
)
algorithm = attr.ib(
hash=True, default=None, validator=attr.validators.optional(attr.validators.instance_of(Algorithm))
)
frame_length = attr.ib(hash=True, default=FRAME_LENGTH, validator=attr.validators.instance_of(six.integer_types))
class StreamEncryptor(_EncryptionStream): # pylint: disable=too-many-instance-attributes
"""Provides a streaming encryptor for encrypting a stream source.
Behaves as a standard file-like object.
.. note::
Take care when encrypting framed messages with large frame length and large non-framed
messages. See :class:`aws_encryption_sdk.stream` for more details.
.. note::
If config is provided, all other parameters are ignored.
:param config: Client configuration object (config or individual parameters required)
:type config: aws_encryption_sdk.streaming_client.EncryptorConfig
:param source: Source data to encrypt or decrypt
:type source: str, bytes, io.IOBase, or file
:param materials_manager: `CryptoMaterialsManager` from which to obtain cryptographic materials
(either `materials_manager` or `key_provider` required)
:type materials_manager: aws_encryption_sdk.materials_manager.base.CryptoMaterialsManager
:param key_provider: `MasterKeyProvider` from which to obtain data keys for encryption
(either `materials_manager` or `key_provider` required)
:type key_provider: aws_encryption_sdk.key_providers.base.MasterKeyProvider
:param int source_length: Length of source data (optional)
.. note::
If source_length is not provided and unframed message is being written or read() is called,
will attempt to seek() to the end of the stream and tell() to find the length of source data.
.. note::
.. versionadded:: 1.3.0
If `source_length` and `materials_manager` are both provided, the total plaintext bytes
encrypted will not be allowed to exceed `source_length`. To maintain backwards compatibility,
this is not enforced if a `key_provider` is provided.
:param dict encryption_context: Dictionary defining encryption context
:param algorithm: Algorithm to use for encryption
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param int frame_length: Frame length in bytes
"""
_config_class = EncryptorConfig
def __init__(self, **kwargs): # pylint: disable=unused-argument,super-init-not-called
"""Prepares necessary initial values."""
self.sequence_number = 1
self.content_type = aws_encryption_sdk.internal.utils.content_type(self.config.frame_length)
self._bytes_encrypted = 0
if self.config.frame_length == 0 and (
self.config.source_length is not None and self.config.source_length > MAX_NON_FRAMED_SIZE
):
raise SerializationError("Source too large for non-framed message")
self.__unframed_plaintext_cache = io.BytesIO()
self.__message_complete = False
def ciphertext_length(self):
"""Returns the length of the resulting ciphertext message in bytes.
:rtype: int
"""
return aws_encryption_sdk.internal.formatting.ciphertext_length(
header=self.header, plaintext_length=self.stream_length
)
def _prep_message(self):
"""Performs initial message setup.
:raises MasterKeyProviderError: if primary master key is not a member of supplied MasterKeyProvider
:raises MasterKeyProviderError: if no Master Keys are returned from key_provider
"""
validate_commitment_policy_on_encrypt(self.config.commitment_policy, self.config.algorithm)
try:
plaintext_length = self.stream_length
except NotSupportedError:
plaintext_length = None
encryption_materials_request = EncryptionMaterialsRequest(
algorithm=self.config.algorithm,
encryption_context=self.config.encryption_context.copy(),
frame_length=self.config.frame_length,
plaintext_rostream=aws_encryption_sdk.internal.utils.streams.ROStream(self.source_stream),
plaintext_length=plaintext_length,
commitment_policy=self.config.commitment_policy,
)
self._encryption_materials = self.config.materials_manager.get_encryption_materials(
request=encryption_materials_request
)
if self.config.algorithm is not None and self._encryption_materials.algorithm != self.config.algorithm:
raise ActionNotAllowedError(
(
"Cryptographic materials manager provided algorithm suite"
" differs from algorithm suite in request.\n"
"Required: {requested}\n"
"Provided: {provided}"
).format(requested=self.config.algorithm, provided=self._encryption_materials.algorithm)
)
num_keys = len(self._encryption_materials.encrypted_data_keys)
if self.config.max_encrypted_data_keys and num_keys > self.config.max_encrypted_data_keys:
raise MaxEncryptedDataKeysExceeded(num_keys, self.config.max_encrypted_data_keys)
if self._encryption_materials.signing_key is None:
self.signer = None
else:
self.signer = Signer.from_key_bytes(
algorithm=self._encryption_materials.algorithm, key_bytes=self._encryption_materials.signing_key
)
aws_encryption_sdk.internal.utils.validate_frame_length(
frame_length=self.config.frame_length, algorithm=self._encryption_materials.algorithm
)
message_id = aws_encryption_sdk.internal.utils.message_id(
self._encryption_materials.algorithm.message_id_length()
)
self._derived_data_key = derive_data_encryption_key(
source_key=self._encryption_materials.data_encryption_key.data_key,
algorithm=self._encryption_materials.algorithm,
message_id=message_id,
)
self._header = self.generate_header(message_id)
self._write_header()
if self.content_type == ContentType.NO_FRAMING:
self._prep_non_framed()
self._message_prepped = True
def generate_header(self, message_id):
"""Generates the header object.
:param message_id: The randomly generated id for the message
:type message_id: bytes
"""
version = VERSION
if self._encryption_materials.algorithm.message_format_version == 0x02:
version = SerializationVersion.V2
kwargs = dict(
version=version,
algorithm=self._encryption_materials.algorithm,
message_id=message_id,
encryption_context=self._encryption_materials.encryption_context,
encrypted_data_keys=self._encryption_materials.encrypted_data_keys,
content_type=self.content_type,
frame_length=self.config.frame_length,
)
if self._encryption_materials.algorithm.is_committing():
commitment_key = calculate_commitment_key(
source_key=self._encryption_materials.data_encryption_key.data_key,
algorithm=self._encryption_materials.algorithm,
message_id=message_id,
)
kwargs["commitment_key"] = commitment_key
if version == SerializationVersion.V1:
kwargs["type"] = TYPE
kwargs["content_aad_length"] = 0
kwargs["header_iv_length"] = self._encryption_materials.algorithm.iv_len
return MessageHeader(**kwargs)
def _write_header(self):
"""Builds the message header and writes it to the output stream."""
self.output_buffer += serialize_header(header=self._header, signer=self.signer)
self.output_buffer += serialize_header_auth(
version=self._header.version,
algorithm=self._encryption_materials.algorithm,
header=self.output_buffer,
data_encryption_key=self._derived_data_key,
signer=self.signer,
)
def _prep_non_framed(self):
"""Prepare the opening data for a non-framed message."""
try:
plaintext_length = self.stream_length
self.__unframed_plaintext_cache = self.source_stream
except NotSupportedError:
# We need to know the plaintext length before we can start processing the data.
# If we cannot seek on the source then we need to read the entire source into memory.
self.__unframed_plaintext_cache = io.BytesIO()
self.__unframed_plaintext_cache.write(self.source_stream.read())
plaintext_length = self.__unframed_plaintext_cache.tell()
self.__unframed_plaintext_cache.seek(0)
aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string(
content_type=self.content_type, is_final_frame=True
)
associated_data = assemble_content_aad(
message_id=self._header.message_id,
aad_content_string=aad_content_string,
seq_num=1,
length=plaintext_length,
)
self.encryptor = Encryptor(
algorithm=self._encryption_materials.algorithm,
key=self._derived_data_key,
associated_data=associated_data,
iv=non_framed_body_iv(self._encryption_materials.algorithm),
)
self.output_buffer += serialize_non_framed_open(
algorithm=self._encryption_materials.algorithm,
iv=self.encryptor.iv,
plaintext_length=plaintext_length,
signer=self.signer,
)
def _read_bytes_to_non_framed_body(self, b):
"""Reads the requested number of bytes from source to a streaming non-framed message body.
:param int b: Number of bytes to read
:returns: Encrypted bytes from source stream
:rtype: bytes
"""
_LOGGER.debug("Reading %d bytes", b)
plaintext = self.__unframed_plaintext_cache.read(b)
plaintext_length = len(plaintext)
if self.tell() + len(plaintext) > MAX_NON_FRAMED_SIZE:
raise SerializationError("Source too large for non-framed message")
ciphertext = self.encryptor.update(plaintext)
self._bytes_encrypted += plaintext_length
if self.signer is not None:
self.signer.update(ciphertext)
if len(plaintext) < b:
_LOGGER.debug("Closing encryptor after receiving only %d bytes of %d bytes requested", plaintext_length, b)
closing = self.encryptor.finalize()
if self.signer is not None:
self.signer.update(closing)
closing += serialize_non_framed_close(tag=self.encryptor.tag, signer=self.signer)
if self.signer is not None:
closing += serialize_footer(self.signer)
self.__message_complete = True
return ciphertext + closing
return ciphertext
def _read_bytes_to_framed_body(self, b):
"""Reads the requested number of bytes from source to a streaming framed message body.
:param int b: Number of bytes to read
:returns: Bytes read from source stream, encrypted, and serialized
:rtype: bytes
"""
_LOGGER.debug("collecting %d bytes", b)
_b = b
if b > 0:
_frames_to_read = math.ceil(b / float(self.config.frame_length))
b = int(_frames_to_read * self.config.frame_length)
_LOGGER.debug("%d bytes requested; reading %d bytes after normalizing to frame length", _b, b)
plaintext = self.source_stream.read(b)
plaintext_length = len(plaintext)
_LOGGER.debug("%d bytes read from source", plaintext_length)
finalize = False
if b < 0 or plaintext_length < b:
_LOGGER.debug("Final plaintext read from source")
finalize = True
output = b""
final_frame_written = False
while (
# If not finalizing on this pass, exit when plaintext is exhausted
(not finalize and plaintext)
# If finalizing on this pass, wait until final frame is written
or (finalize and not final_frame_written)
):
current_plaintext_length = len(plaintext)
is_final_frame = finalize and current_plaintext_length < self.config.frame_length
bytes_in_frame = min(current_plaintext_length, self.config.frame_length)
_LOGGER.debug(
"Writing %d bytes into%s frame %d",
bytes_in_frame,
" final" if is_final_frame else "",
self.sequence_number,
)
self._bytes_encrypted += bytes_in_frame
ciphertext, plaintext = serialize_frame(
algorithm=self._encryption_materials.algorithm,
plaintext=plaintext,
message_id=self._header.message_id,
data_encryption_key=self._derived_data_key,
frame_length=self.config.frame_length,
sequence_number=self.sequence_number,
is_final_frame=is_final_frame,
signer=self.signer,
)
final_frame_written = is_final_frame
output += ciphertext
self.sequence_number += 1
if finalize:
_LOGGER.debug("Writing footer")
if self.signer is not None:
output += serialize_footer(self.signer)
self.__message_complete = True
return output
def _read_bytes(self, b):
"""Reads the requested number of bytes from a streaming message body.
:param int b: Number of bytes to read
:raises NotSupportedError: if content type is not supported
"""
_LOGGER.debug("%d bytes requested from stream with content type: %s", b, self.content_type)
if 0 <= b <= len(self.output_buffer) or self.__message_complete:
_LOGGER.debug("No need to read from source stream or source stream closed")
return
if self.content_type == ContentType.FRAMED_DATA:
_LOGGER.debug("Reading to framed body")
self.output_buffer += self._read_bytes_to_framed_body(b)
elif self.content_type == ContentType.NO_FRAMING:
_LOGGER.debug("Reading to non-framed body")
self.output_buffer += self._read_bytes_to_non_framed_body(b)
else:
raise NotSupportedError("Unsupported content type")
# To maintain backwards compatibility, only enforce this if a CMM is provided by the caller.
if self.config.key_provider is None and self.config.source_length is not None:
# Enforce that if the caller provided a source length value, the total bytes encrypted
# must not exceed that value.
if self._bytes_encrypted > self.config.source_length:
raise CustomMaximumValueExceeded(
"Bytes encrypted has exceeded stated source length estimate:\n{actual:d} > {estimated:d}".format(
actual=self._bytes_encrypted, estimated=self.config.source_length
)
)
def close(self):
"""Closes out the stream."""
_LOGGER.debug("Closing stream")
super(StreamEncryptor, self).close()
@attr.s(hash=True)
class DecryptorConfig(_ClientConfig):
"""Configuration object for StreamDecryptor class.
:param source: Source data to encrypt or decrypt
:type source: str, bytes, io.IOBase, or file
:param materials_manager: `CryptoMaterialsManager` from which to obtain cryptographic materials
(either `materials_manager` or `key_provider` required)
:type materials_manager: aws_encryption_sdk.materials_managers.base.CryptoMaterialsManager
:param key_provider: `MasterKeyProvider` from which to obtain data keys for decryption
(either `materials_manager` or `key_provider` required)
:type key_provider: aws_encryption_sdk.key_providers.base.MasterKeyProvider
:param int source_length: Length of source data (optional)
.. note::
If source_length is not provided and read() is called, will attempt to seek()
to the end of the stream and tell() to find the length of source data.
:param | |
= Button(description='initial number of unbound ACE2 receptors on surface', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float58, units_btn, description_btn]
box66 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_external_ACE2', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float59 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='receptors', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial number of bound ACE2 receptors on surface', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float59, units_btn, description_btn]
box67 = Box(children=row, layout=box_layout)
name_btn = Button(description='unbound_internal_ACE2', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float60 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='receptors', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial number of internalized unbound ACE2 receptors', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float60, units_btn, description_btn]
box68 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_internal_ACE2', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float61 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='receptors', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial number of internalized bound ACE2 receptors', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float61, units_btn, description_btn]
box69 = Box(children=row, layout=box_layout)
name_btn = Button(description='ACE2_binding_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float62 = FloatText(value='0.001', step='0.0001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='ACE2 receptor-virus binding rate', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float62, units_btn, description_btn]
box70 = Box(children=row, layout=box_layout)
name_btn = Button(description='ACE2_endocytosis_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float63 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='ACE2 receptor-virus endocytosis rate', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float63, units_btn, description_btn]
box71 = Box(children=row, layout=box_layout)
name_btn = Button(description='ACE2_cargo_release_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float64 = FloatText(value='0.001', step='0.0001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='ACE2 receptor-virus cargo release rate', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float64, units_btn, description_btn]
box72 = Box(children=row, layout=box_layout)
name_btn = Button(description='ACE2_recycling_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float65 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='ACE2 receptor recycling rate', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float65, units_btn, description_btn]
box73 = Box(children=row, layout=box_layout)
name_btn = Button(description='max_infected_apoptosis_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float66 = FloatText(value='0.001', step='0.0001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='maximum rate of cell apoptosis due to viral infection', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float66, units_btn, description_btn]
box74 = Box(children=row, layout=box_layout)
name_btn = Button(description='max_apoptosis_half_max', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float67 = FloatText(value='250', step='10', style=style, layout=widget_layout)
units_btn = Button(description='virion', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='viral load at which cells reach half max apoptosis rate', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float67, units_btn, description_btn]
box75 = Box(children=row, layout=box_layout)
name_btn = Button(description='apoptosis_hill_power', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float68 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='none', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='Hill power for viral load apoptosis response', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float68, units_btn, description_btn]
box76 = Box(children=row, layout=box_layout)
name_btn = Button(description='virus_fraction_released_at_death', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float69 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='none', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='fraction of internal virus released at cell death', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float69, units_btn, description_btn]
box77 = Box(children=row, layout=box_layout)
name_btn = Button(description='infected_cell_chemokine_secretion_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float70 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='max rate that infected cells secrete chemokine', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float70, units_btn, description_btn]
box78 = Box(children=row, layout=box_layout)
name_btn = Button(description='debris_secretion_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float71 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='rate that dead cells release debris', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float71, units_btn, description_btn]
box79 = Box(children=row, layout=box_layout)
name_btn = Button(description='infected_cell_chemokine_secretion_activated', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float72 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='none', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='used internally to track activation of chemokine secretion', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float72, units_btn, description_btn]
box80 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear_NFkB', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float73 = FloatText(value='0.25', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial nuclear NFkB concentration', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float73, units_btn, description_btn]
box81 = Box(children=row, layout=box_layout)
name_btn = Button(description='inactive_NLRP3', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float74 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration of inactive NLRP3', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float74, units_btn, description_btn]
box82 = Box(children=row, layout=box_layout)
name_btn = Button(description='active_NLRP3', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float75 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration of active NLRP3', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float75, units_btn, description_btn]
box83 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_NLRP3', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float76 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration of inflammasone bound', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float76, units_btn, description_btn]
box84 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_ASC', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float77 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration of bound ASC', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float77, units_btn, description_btn]
box85 = Box(children=row, layout=box_layout)
name_btn = Button(description='bound_caspase1', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float78 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration of bound caspase1', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float78, units_btn, description_btn]
box86 = Box(children=row, layout=box_layout)
name_btn = Button(description='cleaved_gasderminD', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float79 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration cleaved gasderminD', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float79, units_btn, description_btn]
box87 = Box(children=row, layout=box_layout)
name_btn = Button(description='pro_IL_1b', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float80 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration pro-IL-1b', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float80, units_btn, description_btn]
box88 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_IL_1b', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float81 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration cytoplasmic IL-1b', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float81, units_btn, description_btn]
box89 = Box(children=row, layout=box_layout)
name_btn = Button(description='external_IL_1b', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float82 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration external IL-1b', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float82, units_btn, description_btn]
box90 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_IL_18', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float83 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='initial concentration cytoplasmic IL-18', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float83, units_btn, description_btn]
box91 = Box(children=row, layout=box_layout)
name_btn = Button(description='external_IL_18', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float84 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='initial concentration external IL-18', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float84, units_btn, description_btn]
box92 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float85 = FloatText(value='2494', step='100', style=style, layout=widget_layout)
units_btn = Button(description='a.u. of volume', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='cytoplasmic cell volume', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float85, units_btn, description_btn]
box93 = Box(children=row, layout=box_layout)
| |
= Constraint(expr= - 6*m.b103 + m.x1384 <= 0)
m.c4525 = Constraint(expr= - 6*m.b104 + m.x1385 <= 0)
m.c4526 = Constraint(expr= - 6*m.b105 + m.x1386 <= 0)
m.c4527 = Constraint(expr= - 6*m.b106 + m.x1387 <= 0)
m.c4528 = Constraint(expr= - 6*m.b107 + m.x1388 <= 0)
m.c4529 = Constraint(expr= - 6*m.b108 + m.x1389 <= 0)
m.c4530 = Constraint(expr= - 6*m.b109 + m.x1390 <= 0)
m.c4531 = Constraint(expr= - 6*m.b110 + m.x1391 <= 0)
m.c4532 = Constraint(expr= - 6*m.b111 + m.x1392 <= 0)
m.c4533 = Constraint(expr= - 6*m.b112 + m.x1393 <= 0)
m.c4534 = Constraint(expr= - 6*m.b113 + m.x1394 <= 0)
m.c4535 = Constraint(expr= - 6*m.b114 + m.x1395 <= 0)
m.c4536 = Constraint(expr= - 6*m.b115 + m.x1396 <= 0)
m.c4537 = Constraint(expr= - 6*m.b116 + m.x1397 <= 0)
m.c4538 = Constraint(expr= - 6*m.b117 + m.x1398 <= 0)
m.c4539 = Constraint(expr= - 6*m.b118 + m.x1399 <= 0)
m.c4540 = Constraint(expr= - 6*m.b119 + m.x1400 <= 0)
m.c4541 = Constraint(expr= - 6*m.b120 + m.x1401 <= 0)
m.c4542 = Constraint(expr= - 5*m.b121 + m.x1402 <= 0)
m.c4543 = Constraint(expr= - 5*m.b122 + m.x1403 <= 0)
m.c4544 = Constraint(expr= - 5*m.b123 + m.x1404 <= 0)
m.c4545 = Constraint(expr= - 5*m.b124 + m.x1405 <= 0)
m.c4546 = Constraint(expr= - 5*m.b125 + m.x1406 <= 0)
m.c4547 = Constraint(expr= - 5*m.b126 + m.x1407 <= 0)
m.c4548 = Constraint(expr= - 5*m.b127 + m.x1408 <= 0)
m.c4549 = Constraint(expr= - 5*m.b128 + m.x1409 <= 0)
m.c4550 = Constraint(expr= - 5*m.b129 + m.x1410 <= 0)
m.c4551 = Constraint(expr= - 5*m.b130 + m.x1411 <= 0)
m.c4552 = Constraint(expr= - 5*m.b131 + m.x1412 <= 0)
m.c4553 = Constraint(expr= - 5*m.b132 + m.x1413 <= 0)
m.c4554 = Constraint(expr= - 5*m.b133 + m.x1414 <= 0)
m.c4555 = Constraint(expr= - 5*m.b134 + m.x1415 <= 0)
m.c4556 = Constraint(expr= - 5*m.b135 + m.x1416 <= 0)
m.c4557 = Constraint(expr= - 5*m.b136 + m.x1417 <= 0)
m.c4558 = Constraint(expr= - 5*m.b137 + m.x1418 <= 0)
m.c4559 = Constraint(expr= - 5*m.b138 + m.x1419 <= 0)
m.c4560 = Constraint(expr= - 5*m.b139 + m.x1420 <= 0)
m.c4561 = Constraint(expr= - 5*m.b140 + m.x1421 <= 0)
m.c4562 = Constraint(expr= - 8*m.b141 + m.x1422 <= 0)
m.c4563 = Constraint(expr= - 8*m.b142 + m.x1423 <= 0)
m.c4564 = Constraint(expr= - 8*m.b143 + m.x1424 <= 0)
m.c4565 = Constraint(expr= - 8*m.b144 + m.x1425 <= 0)
m.c4566 = Constraint(expr= - 8*m.b145 + m.x1426 <= 0)
m.c4567 = Constraint(expr= - 8*m.b146 + m.x1427 <= 0)
m.c4568 = Constraint(expr= - 8*m.b147 + m.x1428 <= 0)
m.c4569 = Constraint(expr= - 8*m.b148 + m.x1429 <= 0)
m.c4570 = Constraint(expr= - 8*m.b149 + m.x1430 <= 0)
m.c4571 = Constraint(expr= - 8*m.b150 + m.x1431 <= 0)
m.c4572 = Constraint(expr= - 8*m.b151 + m.x1432 <= 0)
m.c4573 = Constraint(expr= - 8*m.b152 + m.x1433 <= 0)
m.c4574 = Constraint(expr= - 8*m.b153 + m.x1434 <= 0)
m.c4575 = Constraint(expr= - 8*m.b154 + m.x1435 <= 0)
m.c4576 = Constraint(expr= - 8*m.b155 + m.x1436 <= 0)
m.c4577 = Constraint(expr= - 8*m.b156 + m.x1437 <= 0)
m.c4578 = Constraint(expr= - 8*m.b157 + m.x1438 <= 0)
m.c4579 = Constraint(expr= - 8*m.b158 + m.x1439 <= 0)
m.c4580 = Constraint(expr= - 8*m.b159 + m.x1440 <= 0)
m.c4581 = Constraint(expr= - 8*m.b160 + m.x1441 <= 0)
m.c4582 = Constraint(expr= - 7*m.b161 + m.x1442 <= 0)
m.c4583 = Constraint(expr= - 7*m.b162 + m.x1443 <= 0)
m.c4584 = Constraint(expr= - 7*m.b163 + m.x1444 <= 0)
m.c4585 = Constraint(expr= - 7*m.b164 + m.x1445 <= 0)
m.c4586 = Constraint(expr= - 7*m.b165 + m.x1446 <= 0)
m.c4587 = Constraint(expr= - 7*m.b166 + m.x1447 <= 0)
m.c4588 = Constraint(expr= - 7*m.b167 + m.x1448 <= 0)
m.c4589 = Constraint(expr= - 7*m.b168 + m.x1449 <= 0)
m.c4590 = Constraint(expr= - 7*m.b169 + m.x1450 <= 0)
m.c4591 = Constraint(expr= - 7*m.b170 + m.x1451 <= 0)
m.c4592 = Constraint(expr= - 7*m.b171 + m.x1452 <= 0)
m.c4593 = Constraint(expr= - 7*m.b172 + m.x1453 <= 0)
m.c4594 = Constraint(expr= - 7*m.b173 + m.x1454 <= 0)
m.c4595 = Constraint(expr= - 7*m.b174 + m.x1455 <= 0)
m.c4596 = Constraint(expr= - 7*m.b175 + m.x1456 <= 0)
m.c4597 = Constraint(expr= - 7*m.b176 + m.x1457 <= 0)
m.c4598 = Constraint(expr= - 7*m.b177 + m.x1458 <= 0)
m.c4599 = Constraint(expr= - 7*m.b178 + m.x1459 <= 0)
m.c4600 = Constraint(expr= - 7*m.b179 + m.x1460 <= 0)
m.c4601 = Constraint(expr= - 7*m.b180 + m.x1461 <= 0)
m.c4602 = Constraint(expr= - 9*m.b181 + m.x1462 <= 0)
m.c4603 = Constraint(expr= - 9*m.b182 + m.x1463 <= 0)
m.c4604 = Constraint(expr= - 9*m.b183 + m.x1464 <= 0)
m.c4605 = Constraint(expr= - 9*m.b184 + m.x1465 <= 0)
m.c4606 = Constraint(expr= - 9*m.b185 + m.x1466 <= 0)
m.c4607 = Constraint(expr= - 9*m.b186 + m.x1467 <= 0)
m.c4608 = Constraint(expr= - 9*m.b187 + m.x1468 <= 0)
m.c4609 = Constraint(expr= - 9*m.b188 + m.x1469 <= 0)
m.c4610 = Constraint(expr= - 9*m.b189 + m.x1470 <= 0)
m.c4611 = Constraint(expr= - 9*m.b190 + m.x1471 <= 0)
m.c4612 = Constraint(expr= - 9*m.b191 + m.x1472 <= 0)
m.c4613 = Constraint(expr= - 9*m.b192 + m.x1473 <= 0)
m.c4614 = Constraint(expr= - 9*m.b193 + m.x1474 <= 0)
m.c4615 = Constraint(expr= - 9*m.b194 + m.x1475 <= 0)
m.c4616 = Constraint(expr= - 9*m.b195 + m.x1476 <= 0)
m.c4617 = Constraint(expr= - 9*m.b196 + m.x1477 <= 0)
m.c4618 = Constraint(expr= - 9*m.b197 + m.x1478 <= 0)
m.c4619 = Constraint(expr= - 9*m.b198 + m.x1479 <= 0)
m.c4620 = Constraint(expr= - 9*m.b199 + m.x1480 <= 0)
m.c4621 = Constraint(expr= - 9*m.b200 + m.x1481 <= 0)
m.c4622 = Constraint(expr= - 6*m.b201 + m.x1482 <= 0)
m.c4623 = Constraint(expr= - 6*m.b202 + m.x1483 <= 0)
m.c4624 = Constraint(expr= - 6*m.b203 + m.x1484 <= 0)
m.c4625 = Constraint(expr= - 6*m.b204 + m.x1485 <= 0)
m.c4626 = Constraint(expr= - 6*m.b205 + m.x1486 <= 0)
m.c4627 = Constraint(expr= - 6*m.b206 + m.x1487 <= 0)
m.c4628 = Constraint(expr= - 6*m.b207 + m.x1488 <= 0)
m.c4629 = Constraint(expr= - 6*m.b208 + m.x1489 <= 0)
m.c4630 = Constraint(expr= - 6*m.b209 + m.x1490 <= 0)
m.c4631 = Constraint(expr= - 6*m.b210 + m.x1491 <= 0)
m.c4632 = Constraint(expr= - 6*m.b211 + m.x1492 <= 0)
m.c4633 = Constraint(expr= - 6*m.b212 + m.x1493 <= 0)
m.c4634 = Constraint(expr= - 6*m.b213 + m.x1494 <= 0)
m.c4635 = Constraint(expr= - 6*m.b214 + m.x1495 <= 0)
m.c4636 = Constraint(expr= - 6*m.b215 + m.x1496 <= 0)
m.c4637 = Constraint(expr= - 6*m.b216 + m.x1497 <= 0)
m.c4638 = Constraint(expr= - 6*m.b217 + m.x1498 <= 0)
m.c4639 = Constraint(expr= - 6*m.b218 + m.x1499 <= 0)
m.c4640 = Constraint(expr= - 6*m.b219 + m.x1500 <= 0)
m.c4641 = Constraint(expr= - 6*m.b220 + m.x1501 <= 0)
m.c4642 = Constraint(expr= - 8*m.b221 + m.x1502 <= 0)
m.c4643 = Constraint(expr= - 8*m.b222 + m.x1503 <= 0)
m.c4644 = Constraint(expr= - 8*m.b223 + m.x1504 <= 0)
m.c4645 = Constraint(expr= - 8*m.b224 + m.x1505 <= 0)
m.c4646 = Constraint(expr= - 8*m.b225 + m.x1506 <= 0)
m.c4647 = Constraint(expr= - 8*m.b226 + m.x1507 <= 0)
m.c4648 = Constraint(expr= - 8*m.b227 + m.x1508 <= 0)
m.c4649 = Constraint(expr= - 8*m.b228 + m.x1509 <= 0)
m.c4650 = Constraint(expr= - 8*m.b229 + m.x1510 <= 0)
m.c4651 = Constraint(expr= - 8*m.b230 + m.x1511 <= 0)
m.c4652 = Constraint(expr= - 8*m.b231 + m.x1512 <= 0)
m.c4653 = Constraint(expr= - 8*m.b232 + m.x1513 <= 0)
m.c4654 = Constraint(expr= - 8*m.b233 + m.x1514 <= 0)
m.c4655 = Constraint(expr= - 8*m.b234 + m.x1515 <= 0)
m.c4656 = Constraint(expr= - 8*m.b235 + m.x1516 <= 0)
m.c4657 = Constraint(expr= - 8*m.b236 + m.x1517 <= 0)
m.c4658 = Constraint(expr= - 8*m.b237 + m.x1518 <= 0)
m.c4659 = Constraint(expr= - 8*m.b238 + m.x1519 <= 0)
m.c4660 = Constraint(expr= - 8*m.b239 + m.x1520 <= 0)
m.c4661 = Constraint(expr= - 8*m.b240 + m.x1521 <= 0)
m.c4662 = Constraint(expr= - 9*m.b241 + m.x1522 <= 0)
m.c4663 = Constraint(expr= - 9*m.b242 + m.x1523 <= 0)
m.c4664 = Constraint(expr= - 9*m.b243 + m.x1524 <= 0)
m.c4665 = Constraint(expr= - 9*m.b244 + m.x1525 <= 0)
m.c4666 = Constraint(expr= - 9*m.b245 + m.x1526 <= 0)
m.c4667 = Constraint(expr= - 9*m.b246 + m.x1527 <= 0)
m.c4668 = Constraint(expr= - 9*m.b247 + m.x1528 <= 0)
m.c4669 = Constraint(expr= - 9*m.b248 + m.x1529 <= 0)
m.c4670 = Constraint(expr= - 9*m.b249 + m.x1530 <= 0)
m.c4671 = Constraint(expr= - 9*m.b250 + m.x1531 <= 0)
m.c4672 = Constraint(expr= - 9*m.b251 + m.x1532 <= 0)
m.c4673 = Constraint(expr= - 9*m.b252 + m.x1533 <= 0)
m.c4674 = Constraint(expr= - 9*m.b253 + m.x1534 <= 0)
m.c4675 = Constraint(expr= - 9*m.b254 + m.x1535 <= 0)
m.c4676 = Constraint(expr= - 9*m.b255 + m.x1536 <= 0)
m.c4677 = Constraint(expr= - 9*m.b256 + m.x1537 <= 0)
m.c4678 = Constraint(expr= - 9*m.b257 + m.x1538 <= 0)
m.c4679 = Constraint(expr= - 9*m.b258 + m.x1539 <= 0)
m.c4680 = Constraint(expr= - 9*m.b259 + m.x1540 <= 0)
m.c4681 = Constraint(expr= - 9*m.b260 + m.x1541 <= 0)
m.c4682 = Constraint(expr= - 8*m.b261 + m.x1542 <= 0)
m.c4683 = Constraint(expr= - 8*m.b262 + m.x1543 <= 0)
m.c4684 | |
import os
import shutil
import importlib
import inspect
from glob import glob
import re
import json
from scrapy.http import Request
from scrapy.utils.python import to_unicode
from scrapy.utils.reqser import request_from_dict, _get_method
from scrapy.exceptions import _InvalidOutput, UsageError
def get_cb_settings(test_dir):
config_path = os.path.join(test_dir, 'config.py')
if not os.path.exists(config_path):
return None
spec = importlib.util.spec_from_file_location("config", config_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
return config
def get_test_paths(spider_test_dir, spider_path, extra_path, fixture=False):
poss_cb_names = get_callbacks(spider_path)
dir_list = os.listdir(spider_test_dir)
paths = []
# assuming extra path in play
if len(set(dir_list).intersection(set(poss_cb_names))) == 0:
if extra_path:
diff_path = os.path.join(spider_test_dir, extra_path)
dir_list = os.listdir(diff_path)
cb_list = filter(lambda d: '.' not in d, dir_list)
for cb in cb_list:
target = os.path.join(diff_path, cb)
if fixture:
target = os.path.join(target, '*.bin')
paths += glob(target)
else:
cb_list = filter(lambda d: '.' not in d, dir_list)
for cb in cb_list:
target = os.path.join(spider_test_dir, cb)
if fixture:
target = os.path.join(target, '*.bin')
paths += glob(target)
return paths
def update_max_fixtures(cb_settings, global_max_fixtures):
try:
max_fixtures = cb_settings.MAX_FIXTURES
return max_fixtures
except AttributeError:
return global_max_fixtures
def get_num_fixtures(test_dir):
if not os.path.exists(test_dir):
return 0
try:
dir_list = os.listdir(test_dir)
fixture_count = len(list(filter(lambda d: '.bin' in d, dir_list)))
except IndexError:
fixture_count = 0
return fixture_count
def get_fixture_counts(spider_dir, spider, extra_path):
fixture_counts = {}
poss_cb_names = [name for name in dir(spider) if not name.startswith('__') and not
name == "start_requests" and callable(getattr(spider, name))]
dir_list = os.listdir(spider_dir)
# assuming extra path in play
if len(set(dir_list).intersection(set(poss_cb_names))) == 0:
if extra_path:
new_path = os.path.join(spider_dir, extra_path)
dir_list = os.listdir(new_path)
cb_list = filter(lambda d: '.' not in d, dir_list)
for cb in cb_list:
test_dir = os.path.join(new_path, cb)
fixture_counts[cb] = get_num_fixtures(test_dir)
else:
for cb in poss_cb_names:
fixture_counts[cb] = 0
else:
for cb in filter(lambda d: '.' not in d, dir_list):
test_dir = os.path.join(spider_dir, cb)
fixture_counts[cb] = get_num_fixtures(test_dir)
return fixture_counts
def basic_items_check(items, obligate_fields, primary_fields, request_url):
for item in items:
if not set(item.keys()).intersection(obligate_fields) == obligate_fields:
missing_fields = obligate_fields.difference(item.keys())
raise _InvalidOutput("Obligate fields check failed. Request url: %s. "
"Missing fields: %s" % (request_url, missing_fields))
for field in primary_fields:
if not item.get(field, ""):
raise _InvalidOutput("Primary fields check failed. Request url: %s. "
"Empty field: %s" % (request_url, field))
def check_options(spider_settings, config, items, request_url):
obligate_local = set()
primary_local = set()
obligate_global = set(spider_settings.getlist('TESTMASTER_OBLIGATE_ITEM_FIELDS', []))
primary_global = set(spider_settings.getlist('TESTMASTER_PRIMARY_ITEM_FIELDS', []))
if config is not None:
try:
obligate_local = set(config.OBLIGATE_ITEM_FIELDS)
except AttributeError:
pass
try:
primary_local = set(config.PRIMARY_ITEM_FIELDS)
except AttributeError:
pass
obligate_fields = obligate_local if obligate_local else obligate_global
primary_fields = primary_local if primary_local else primary_global
basic_items_check(items, obligate_fields, primary_fields, request_url)
def check_global_rules(spider_settings, items, requests, request_url):
path_to_rules = spider_settings.get('TESTMASTER_PATH_TO_RULES_FILE', None)
if path_to_rules:
try:
module = importlib.import_module(path_to_rules.replace('/', '.'))
except Exception as e:
print(e)
print("Rules file specified in project/spider "
"settings does not exist.")
if hasattr(module, "ItemRules"):
itemclass = module.ItemRules()
check_item_rules(itemclass, items, request_url)
if hasattr(module, "RequestRules"):
reqclass = module.RequestRules()
check_req_rules(reqclass, requests, request_url)
def check_local_rules(config, items, requests, request_url):
try:
itemclass = config.ItemRules()
check_item_rules(itemclass, items, request_url)
except AttributeError:
pass
try:
reqclass = config.RequestRules()
check_req_rules(reqclass, requests, request_url)
except AttributeError:
pass
def validate_results(test_dir, spider_settings, items, requests, request_url):
config_path = os.path.join(test_dir, 'config.py')
if not os.path.exists(config_path):
config = None
else:
config = get_cb_settings(test_dir)
check_options(spider_settings, config, items, request_url)
check_local_rules(config, items, requests, request_url)
check_global_rules(spider_settings, items, requests, request_url)
def check_item_rules(itemclass, items, request_url):
itemclass_attrs = [(name, getattr(itemclass, name)) for name in dir(itemclass)
if not name.startswith('__')]
item_rules = list(filter(lambda entry: callable(entry[1]), itemclass_attrs))
for item in items:
for rule_func in item_rules:
try:
rule_func[1](item)
except AssertionError:
raise _InvalidOutput("An item produced by the request with url %s has "
"failed the rule %s" % (request_url, rule_func[0]))
def check_req_rules(reqclass, requests, request_url):
reqclass_attrs = [(name, getattr(reqclass, name)) for name in dir(reqclass)
if not name.startswith('__')]
req_rules = list(filter(lambda entry: callable(entry[1]), reqclass_attrs))
for req in requests:
for rule_func in req_rules:
try:
rule_func[1](req)
except AssertionError:
raise _InvalidOutput("A request produced by the request with url %s has "
"failed the rule %s" % (request_url, rule_func[0]))
def _get_num_objects(result, _type):
return len(list(filter(lambda entry: entry['type'] == _type, result)))
def write_json(test_dir, request, result, fixture_num):
fixture = {}
fixture["request"] = request
fixture["num_items"] = _get_num_objects(result, "item")
fixture["num_requests"] = _get_num_objects(result, "request")
json_path = os.path.join(test_dir, 'view.json')
if os.path.exists(json_path):
with open(json_path, 'r') as f:
extant_fixtures = json.load(f)
extant_fixtures[str(fixture_num)] = fixture
with open(json_path, 'w') as f:
json.dump(extant_fixtures, f)
else:
new_fixtures = {"1": fixture}
with open(json_path, 'w') as f:
json.dump(new_fixtures, f)
# The requests involved in the current fixtures will be written here, in JSON format
CURRENT_TESTS = [
''' {
"url": "https://examplewebsite011235811.com",
"headers": {"referer":"...", "content_type": "..."},
"cookies": {},
"method": "POST",
"data": {"x": "y"},
"type": "form",
"meta": {"x": "y"},
"fixture_num": 1
},
{
...
}'''
]
# Simple and efficient regex!
# For a second I was considering loading in module = rookie thinking
def get_callbacks(spider_path):
with open(spider_path, 'r') as spider_file:
text = spider_file.read()
callbacks = list(filter(lambda match: not(match.startswith('__') or match == 'start_requests'),
re.findall(r"def\s+(\w+)\([^\n]+response", text)))
return callbacks
def write_config(path):
config_file = os.path.join(path, 'config.py')
config_src = os.path.dirname(__file__) + '/config_doc.py'
shutil.copyfile(config_src, config_file)
def get_homepage_cookies(spider, mode=""):
import requests
user_agent = spider.settings.get('USER_AGENT')
if len(spider.start_urls) == 1:
inferred_homepage = spider.start_urls[0]
r = requests.get(inferred_homepage, headers={"User-Agent": user_agent})
print("HOMEPAGE STATUS CODE: %s" % str(r.status_code))
return r.cookies.get_dict()
else:
if mode == "parse":
raise UsageError("Homepage option selected but can't determine "
"homepage from start_urls %s" % spider.name,
print_help=False)
print("Couldn't determine homepage to collect cookies from")
return {}
def get_config_requests(test_dir, spider, max_fixtures):
curr_fixture_count = get_num_fixtures(test_dir)
config = get_cb_settings(test_dir)
try:
requests_to_add = config.REQUESTS_TO_ADD
except AttributeError:
return []
defaults = {
'method': 'GET', 'headers': None, 'body': None, 'cookies': None,
'meta': None, '_encoding': 'utf-8', 'priority': 0, 'dont_filter': False,
'errback': None, 'flags': None, 'cb_kwargs': None
}
complete_requests = []
for req in requests_to_add:
if curr_fixture_count < max_fixtures:
for key, val in defaults.items():
req[key] = req.get(key, val)
req['callback'] = _get_method(spider, test_dir.split('/')[-1])
req['meta']['_update'] = 1
req['meta']['_fixture'] = curr_fixture_count + 1
complete_requests.append(req)
curr_fixture_count += 1
else:
break
complete_requests = [request_from_dict(req) for req in complete_requests]
return complete_requests
def get_reqs_multiple(test_paths, spider):
requests = []
for path in test_paths:
requests += get_reqs_to_add(path, spider)
return requests
def get_reqs_to_add(test_dir, spider):
global_max_fixtures = spider.settings.getint(
'TESTMASTER_MAX_FIXTURES_PER_CALLBACK',
default=10
)
cb_settings = get_cb_settings(test_dir)
max_fixtures = update_max_fixtures(cb_settings, global_max_fixtures)
return get_config_requests(test_dir, spider, max_fixtures)
def trigger_requests(crawler_process, spider, requests):
spider_loader = crawler_process.spider_loader
spidercls = spider_loader.load(spider.name)
spidercls.start_requests = lambda s: requests
crawler_process.crawl(spidercls)
crawler_process.start()
def cascade_fixtures(test_dir, min_fixture_cleared):
fixtures = list(filter(lambda d: '.bin' in d, os.listdir(test_dir)))
fixtures_store = [(f, int(re.search(r'(\d+)\.bin', f).group(1))) for f in
fixtures]
fixtures_to_move = list(filter(lambda f: f[1] > min_fixture_cleared,
fixtures_store))
fixtures_to_move.sort(key=lambda f: f[1])
json_path = os.path.join(test_dir, 'view.json')
with open(json_path, 'r') as f:
curr_json = json.load(f)
new_num = min_fixture_cleared
for name, num in fixtures_to_move:
os.rename(os.path.join(test_dir, name),
os.path.join(test_dir, 'fixture%d.bin' % new_num))
curr_json[str(new_num)] = curr_json[str(num)]
del curr_json[str(num)]
new_num += 1
with open(json_path, 'w') as f:
json.dump(curr_json, f)
# This and the next function are extremely similar to functions found in
# scrapy/utils/reqser.py. In fact, this 'request_to_dict' function is unchanged.
# What I changed was the helper function '_find_method', replacing "if func_self
# is obj:" with "if type(func_self) is type(obj):". This is because I found that
# for the update command, the callback that became attached to the request via
# the call of request_from_dict in cli.py was bound to a spider instance
# different from the spider instance that I started running via the above
# function, and so request_to_dict in process_spider_input was failing
# (specifically, func_self (spider instance 1) was registering as different from
# obj (spider instance 2)). Checking more simply that both are instances of the
# same spider class seems to solve the problem without breaking anything!
def request_to_dict(request, spider=None):
"""Convert Request object to a dict.
If a spider is given, it will try to find out the name of the spider method
used in the callback and store that as the callback.
"""
cb = request.callback
if callable(cb):
cb = _find_method(spider, cb)
eb = request.errback
if callable(eb):
eb = _find_method(spider, eb)
d = {
'url': to_unicode(request.url), # urls should be safe (safe_string_url)
'callback': cb,
'errback': eb,
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
'flags': request.flags,
'cb_kwargs': request.cb_kwargs,
}
if type(request) is not Request:
d['_class'] = request.__module__ + '.' + request.__class__.__name__
return d
def _find_method(obj, func):
if obj:
try:
func_self = func.__self__
except AttributeError: # func has no __self__
pass
else:
if type(func_self) is type(obj):
members = inspect.getmembers(obj, predicate=inspect.ismethod)
for name, obj_func in members:
# We need to use __func__ to access the original
# function object because instance method objects
# are generated each time attribute | |
<filename>test/replaybuffer.py
# Copyright (c) 2021: <NAME> (<EMAIL>).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import numpy as np
import random
from collections import deque
import torch as T
from test.segment_tree import MinSegmentTree, SumSegmentTree
class ReplayBuffer:
def __init__(self, n_states, n_actions, args, buffer_size=None):
if buffer_size == None:
buffer_size = args.buffer_size
self.device = args.device
self.states = np.empty([buffer_size, n_states], dtype=np.float32)
self.next_states = np.empty([buffer_size, n_states], dtype=np.float32)
# If the dimension of the action exceeds 1 dimension, then self.actions = np.empty([buffer_size, action_dim], dtype=np.float32)
self.actions = np.empty([buffer_size],dtype=np.float32)
self.rewards = np.empty([buffer_size], dtype=np.float32)
self.masks = np.empty([buffer_size],dtype=np.float32)
self.max_size = buffer_size
self.ptr, self.cur_len, = 0, 0
self.n_states = n_states
self.n_actions = n_actions
self.transitions = []
def store(self, state, action, reward, next_state, mask):
self.states[self.ptr] = state
self.actions[self.ptr] = action
self.rewards[self.ptr] = reward
self.next_states[self.ptr] = next_state
self.masks[self.ptr] = mask
self.ptr = (self.ptr + 1) % self.max_size
self.cur_len = min(self.cur_len + 1, self.max_size)
def sample_batch(self, batch_size):
index = np.random.choice(self.cur_len, batch_size, replace = False)
return dict(state = self.states[index],
action = self.actions[index],
reward = self.rewards[index],
next_state = self.next_states[index],
mask = self.masks[index],
)
def clear(self):
self.states = np.empty([self.max_size, self.n_states], dtype=np.float32)
self.next_states = np.empty([self.max_size, self.n_states], dtype=np.float32)
self.actions = np.empty([self.max_size], dtype=np.float32)
self.rewards = np.empty([self.max_size], dtype=np.float32)
self.masks = np.empty([self.max_size], dtype=np.float32)
self.ptr, self.cur_len, = 0, 0
def store_transition(self, transition):
self.transitions.append(transition)
np.save('bc_memo.npy', self.transitions)
def store_for_BC_data(self, transitions):
for t in transitions:
self.store(*t)
def __len__(self):
return self.cur_len
def ready(self, batch_size):
if self.cur_len >= batch_size:
return True
class ReplayBufferPPO:
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
self.values = []
self.masks = []
self.log_probs = []
def RB_clear(self):
del self.states[:]
del self.actions[:]
del self.rewards[:]
del self.values[:]
del self.masks[:]
del self.log_probs[:]
class PrioritizedReplayBuffer:
def __init__(self, n_states, args, buffer_size, alpha=0.6):
assert alpha >= 0
self.states = np.empty([buffer_size, n_states], dtype=np.float32)
self.next_states = np.empty([buffer_size, n_states], dtype=np.float32)
# If the dimension of the action exceeds 1 dimension, then self.actions = np.empty([buffer_size, action_dim], dtype=np.float32)
self.actions = np.empty([buffer_size], dtype=np.float32)
self.rewards = np.empty([buffer_size], dtype=np.float32)
self.masks = np.empty([buffer_size], dtype=np.float32)
self.max_size = buffer_size
self.ptr, self.cur_len, = 0, 0
self.max_priority, self.tree_ptr = 1.0, 0
self.alpha = alpha
# capacity must be positive and a power of 2.
tree_capacity = 1
while tree_capacity < self.max_size:
tree_capacity *= 2
self.sum_tree = SumSegmentTree(tree_capacity)
self.min_tree = MinSegmentTree(tree_capacity)
def store(self, state , action, reward, next_state, mask):
self.states[self.ptr] = state
self.actions[self.ptr] = action
self.rewards[self.ptr] = reward
self.next_states[self.ptr] = next_state
self.masks[self.ptr] = mask
self.ptr = (self.ptr + 1) % self.max_size
self.cur_len = min(self.cur_len + 1, self.max_size)
self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.tree_ptr = (self.tree_ptr + 1) % self.max_size
def sample_batch(self, batch_size, beta=0.4):
assert len(self) >= batch_size
assert beta > 0
indices = self._sample_proportional(batch_size)
weights = np.array([self._calculate_weight(i, beta) for i in indices])
return dict(
state=self.states[indices],
next_state=self.next_states[indices],
action=self.actions[indices],
reward=self.rewards[indices],
mask=self.masks[indices],
weights=weights,
indices=indices,
)
def update_priorities(self, indices, priorities):
"""Update priorities of sampled transitions."""
assert len(indices) == len(priorities)
for idx, priority in zip(indices, priorities):
assert priority > 0
assert 0 <= idx < len(self)
self.sum_tree[idx] = priority ** self.alpha
self.min_tree[idx] = priority ** self.alpha
self.max_priority = max(self.max_priority, priority)
def _sample_proportional(self, batch_size):
"""Sample indices based on proportions."""
indices = []
p_total = self.sum_tree.sum(0, len(self) - 1)
segment = p_total / batch_size
for i in range(batch_size):
a = segment * i
b = segment * (i + 1)
upperbound = random.uniform(a, b)
idx = self.sum_tree.retrieve(upperbound)
indices.append(idx)
return indices
def _calculate_weight(self, idx, beta):
"""Calculate the weight of the experience at idx."""
# get max weight
p_min = self.min_tree.min() / self.sum_tree.sum()
max_weight = (p_min * len(self)) ** (-beta)
# calculate weights
p_sample = self.sum_tree[idx] / self.sum_tree.sum()
weight = (p_sample * len(self)) ** (-beta)
weight = weight / max_weight
return weight
def __len__(self):
return self.cur_len
def ready(self, batch_size):
if self.cur_len >= batch_size:
return True
class NStepReplayBuffer:
def __init__(self, obs_dim, size, batch_size= 32, n_step= 3, gamma= 0.99):
self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size], dtype=np.float32)
self.rews_buf = np.zeros([size], dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.max_size, self.batch_size = size, batch_size
self.ptr, self.size, = 0, 0
# for N-step Learning
self.n_step_buffer = deque(maxlen=n_step)
self.n_step = n_step
self.gamma = gamma
def store(self, obs, act, rew, next_obs, done):
transition = (obs, act, rew, next_obs, done)
self.n_step_buffer.append(transition)
# single step transition is not ready
if len(self.n_step_buffer) < self.n_step:
return ()
# make a n-step transition
rew, next_obs, done = self._get_n_step_info(
self.n_step_buffer, self.gamma
)
obs, act = self.n_step_buffer[0][:2]
self.obs_buf[self.ptr] = obs
self.next_obs_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
return self.n_step_buffer[0]
def sample_batch(self):
indices = np.random.choice(
self.size, size=self.batch_size, replace=False
)
return dict(
obs=self.obs_buf[indices],
next_obs=self.next_obs_buf[indices],
acts=self.acts_buf[indices],
rews=self.rews_buf[indices],
done=self.done_buf[indices],
# for N-step Learning
indices=indices,
)
def sample_batch_from_idxs(self, indices):
# for N-step Learning
return dict(
obs=self.obs_buf[indices],
next_obs=self.next_obs_buf[indices],
acts=self.acts_buf[indices],
rews=self.rews_buf[indices],
done=self.done_buf[indices],
)
def _get_n_step_info(self, n_step_buffer, gamma):
"""Return n step rew, next_obs, and done."""
# info of the last transition
rew, next_obs, done = n_step_buffer[-1][-3:]
for transition in reversed(list(n_step_buffer)[:-1]):
r, n_o, d = transition[-3:]
rew = r + gamma * rew * (1 - d)
next_obs, done = (n_o, d) if d else (next_obs, done)
return rew, next_obs, done
def __len__(self) -> int:
return self.size
class NStepBuffer:
def __init__(self, gamma=0.99, nstep=3):
self.discounts = [gamma ** i for i in range(nstep)]
self.nstep = nstep
self.states = deque(maxlen=self.nstep)
self.actions = deque(maxlen=self.nstep)
self.rewards = deque(maxlen=self.nstep)
def append(self, state, action, reward):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
def get(self):
assert len(self.rewards) > 0
state = self.states.popleft()
action = self.actions.popleft()
reward = self.nstep_reward()
return state, action, reward
def nstep_reward(self):
reward = np.sum([r * d for r, d in zip(self.rewards, self.discounts)])
self.rewards.popleft()
return reward
def is_empty(self):
return len(self.rewards) == 0
def is_full(self):
return len(self.rewards) == self.nstep
def __len__(self):
return len(self.rewards)
class _ReplayBuffer:
def __init__(self, buffer_size, state_shape, action_shape, device,
gamma, nstep):
self._p = 0
self._n = 0
self.buffer_size = buffer_size
self.state_shape = state_shape
self.action_shape = action_shape
self.device = device
self.gamma = gamma
self.nstep = nstep
self.actions = T.empty(
(buffer_size, *action_shape), dtype=T.float, device=device)
self.rewards = T.empty(
(buffer_size, 1), dtype=T.float, device=device)
self.dones = T.empty(
(buffer_size, 1), dtype=T.float, device=device)
if nstep != 1:
self.nstep_buffer = NStepBuffer(gamma, nstep)
def append(self, state, action, reward, done, next_state,
episode_done=None):
if self.nstep != 1:
self.nstep_buffer.append(state, action, reward)
if self.nstep_buffer.is_full():
state, action, reward = self.nstep_buffer.get()
self._append(state, action, reward, done, next_state)
if done or episode_done:
while not self.nstep_buffer.is_empty():
state, action, reward = self.nstep_buffer.get()
self._append(state, action, reward, done, next_state)
else:
self._append(state, action, reward, done, next_state)
def _append(self, state, action, reward, done, next_state):
self.actions[self._p].copy_(T.from_numpy(action))
self.rewards[self._p] = float(reward)
self.dones[self._p] = float(done)
self._p = (self._p + 1) % self.buffer_size
self._n = min(self._n + 1, self.buffer_size)
class NStepPrioritizedReplayBuffer(NStepReplayBuffer):
def __init__(self, obs_dim, size, batch_size = 32, alpha = 0.6, n_step = 1, gamma = 0.99,):
"""Initialization."""
assert alpha >= 0
super(NStepPrioritizedReplayBuffer, self).__init__(
obs_dim, size, batch_size, n_step, gamma
)
self.max_priority, self.tree_ptr = 1.0, 0
self.alpha = alpha
# capacity must be positive and a power of 2.
tree_capacity = 1
while tree_capacity < self.max_size:
tree_capacity *= 2
self.sum_tree = SumSegmentTree(tree_capacity)
self.min_tree = MinSegmentTree(tree_capacity)
def store(self, obs, act, rew, next_obs, done,):
"""Store experience and priority."""
transition = super().store(obs, act, rew, next_obs, done)
if transition:
self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha
self.tree_ptr = (self.tree_ptr + 1) % self.max_size
return transition
def sample_batch(self, beta = 0.4):
"""Sample a batch of experiences."""
assert len(self) >= self.batch_size
assert beta > 0
indices = self._sample_proportional()
obs = self.obs_buf[indices]
next_obs = self.next_obs_buf[indices]
acts = self.acts_buf[indices]
rews = self.rews_buf[indices]
done = self.done_buf[indices]
weights = np.array([self._calculate_weight(i, beta) for i in indices])
return dict(
obs=obs,
next_obs=next_obs,
acts=acts,
rews=rews,
done=done,
weights=weights,
indices=indices,
)
def update_priorities(self, indices, priorities):
"""Update priorities of sampled transitions."""
assert len(indices) == len(priorities)
for idx, priority in zip(indices, priorities):
assert priority > 0
assert 0 <= idx < len(self)
self.sum_tree[idx] = priority ** self.alpha
self.min_tree[idx] = priority ** self.alpha
self.max_priority = max(self.max_priority, priority)
def _sample_proportional(self):
"""Sample indices based on proportions."""
indices = []
p_total = self.sum_tree.sum(0, len(self) - 1)
segment = p_total / self.batch_size
for i in range(self.batch_size):
a = segment * i
b = segment * (i + | |
<reponame>jkabalar/3DSSG
if __name__ == '__main__' and __package__ is None:
from os import sys
sys.path.append('../')
import argparse, trimesh
import open3d as o3d
import numpy as np
from tqdm import tqdm
from pathlib import Path
from utils import util, util_label, define
from utils.util_search import SAMPLE_METHODS,find_neighbors
from utils import dataLoaderScanNet
debug = True
debug = False
name_same_segment = define.NAME_SAME_PART
def Parser(add_help=True):
parser = argparse.ArgumentParser(description='Process some integers.', formatter_class = argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,conflict_handler='resolve')
parser.add_argument('--type', type=str, default='train', choices=['train', 'test', 'validation'], help="allow multiple rel pred outputs per pair",required=False)
parser.add_argument('--label_type', type=str,default='ScanNet20',
choices=['3RScan', '3RScan160', 'NYU40', 'Eigen13', 'RIO27', 'RIO7','ScanNet20'], help='label',required=False)
parser.add_argument('--pth_out', type=str,default='../data/tmp', help='pth to output directory',required=False)
parser.add_argument('--target_scan', type=str, default='', help='')
parser.add_argument('--scan_name', type=str, default='inseg.ply', help='what is the name of the output filename of the ply generated by your segmentation method.')
## options
parser.add_argument('--verbose', type=bool, default=False, help='verbal',required=False)
parser.add_argument('--debug', type=int, default=0, help='debug',required=False)
## neighbor search parameters
parser.add_argument('--search_method', type=str, choices=['BBOX','KNN'],default='BBOX',help='How to split the scene.')
parser.add_argument('--radius_receptive', type=float,default=0.5,help='The receptive field of each seed.')
# # Correspondence Parameters
parser.add_argument('--max_dist', type=float,default=0.1,help='maximum distance to find corresopndence.')
parser.add_argument('--min_seg_size', type=int,default=512,help='Minimum number of points of a segment.')
parser.add_argument('--corr_thres', type=float,default=0.5,help='How the percentage of the points to the same target segment must exceeds this value.')
parser.add_argument('--occ_thres', type=float,default=0.75,help='2nd/1st must smaller than this.')
return parser
def load_inseg(pth_ply):
# if pth_ply.find('inseg.ply') >=0:
cloud_pd = trimesh.load(pth_ply, process=False)
points_pd = cloud_pd.vertices
segments_pd = cloud_pd.metadata['ply_raw']['vertex']['data']['label'].flatten()
# elif pth_ply.find('cvvseg.ply') >=0:
return cloud_pd, points_pd, segments_pd
def process(pth_scan, scan_id, label_type, verbose=False) -> list:
# some params
max_distance = args.max_dist
filter_segment_size = args.min_seg_size # if the num of points within a segment below this threshold, discard this
filter_corr_thres = args.corr_thres # if percentage of the corresponding label must exceed this value to accept the correspondence
filter_occ_ratio = args.occ_thres
pth_pd = os.path.join(define.SCANNET_DATA_PATH,scan_id,args.scan_name)
pth_ply = os.path.join(define.SCANNET_DATA_PATH,scan_id,scan_id+define.SCANNET_PLY_SUBFIX)
pth_agg = os.path.join(define.SCANNET_DATA_PATH,scan_id,scan_id+define.SCANNET_AGGRE_SUBFIX)
pth_seg = os.path.join(define.SCANNET_DATA_PATH,scan_id,scan_id+define.SCANNET_SEG_SUBFIX)
cloud_gt, points_gt, labels_gt, segments_gt = dataLoaderScanNet.load_scannet(pth_ply, pth_agg, pth_seg)
cloud_pd, points_pd, segments_pd = load_inseg(pth_pd)
# get num of segments
segment_ids = np.unique(segments_pd)
segment_ids = segment_ids[segment_ids!=0]
if args.verbose: print('filtering input segments.. (ori num of segments:',len(segment_ids),')')
segments_pd_filtered=list()
for seg_id in segment_ids:
pts = points_pd[np.where(segments_pd==seg_id)]
if len(pts) > filter_segment_size:
segments_pd_filtered.append(seg_id)
segment_ids = segments_pd_filtered
if args.verbose: print('there are',len(segment_ids), 'segemnts after filtering:\n', segment_ids)
segs_neighbors = find_neighbors(points_pd, segments_pd, search_method,receptive_field=args.radius_receptive,selected_keys=segment_ids)
''' Check GT segments and labels '''
_, label_names, _ = util_label.getLabelMapping(args.label_type)
# print('label_names:',label_names)
instance2labelName = dict()
size_segments_gt = dict()
uni_seg_gt_ids = np.unique(segments_gt).tolist()
for seg_id in uni_seg_gt_ids:
indices = np.where(segments_gt == seg_id)
seg = segments_gt[indices]
labels = labels_gt[indices]
uq_label = np.unique(labels).tolist()
if len(uq_label) > 1:
if verbose or debug:
print('segment',seg_id,'has multiple labels (',uq_label,') in GT. Try to remove other labels.')
max_id=0
max_value=0
for id in uq_label:
if verbose or debug:
print(id, len(labels[labels==id]), '{:1.3f}'.format(len(labels[labels==id])/len(labels)))
if len(labels[labels==id])>max_value:
max_value = len(labels[labels==id])
max_id = id
for label in uq_label:
if label == max_id: continue
if len(labels[labels==id]) > filter_segment_size: # try to generate new segment
new_seg_idx = max(uni_seg_gt_ids)+1
uni_seg_gt_ids.append(new_seg_idx)
for idx in indices[0]:
if labels_gt[idx] == label:
segments_gt[idx] = new_seg_idx
else:
for idx in indices[0]:
if labels_gt[idx] == label:
segments_gt[idx] = 0
labels_gt[idx] = 0 # set other label to 0
seg = segments_gt[indices]
labels = labels_gt[indices]
uq_label = [max_id]
if uq_label[0] == 0 or uq_label[0] > 40:
name = 'none'
else:
name = util_label.NYU40_Label_Names[uq_label[0]-1]
# print(name)
if name not in label_names.values():
name = 'none'
# if label_type == 'ScanNet20':
# if name not in util_label.SCANNET20_Label_Names:
# name = 'none'
size_segments_gt[seg_id] = len(seg)
instance2labelName[seg_id] = name
if verbose:
print('instance2labelNames:')
print(instance2labelName)
''' Save as ply '''
if debug:
colors = util_label.get_NYU40_color_palette()
cloud_gt.visual.vertex_colors = [0,0,0,255]
for seg, label_name in instance2labelName.items():
segment_indices = np.where(segments_gt == seg)[0]
if label_name == 'none':continue
label = util_label.NYU40_Label_Names.index(label_name)+1
for index in segment_indices:
cloud_gt.visual.vertex_colors[index][:3] = colors[label]
cloud_gt.export('tmp_gtcloud.ply')
size_segments_pd = dict()
''' Find and count all corresponding segments'''
tree = o3d.geometry.KDTreeFlann(points_gt.transpose())
count_seg_pd_2_corresponding_seg_gts = dict() # counts each segment_pd to its corresonding segment_gt
for segment_id in segment_ids:
segment_indices = np.where(segments_pd == segment_id)[0]
segment_points = points_pd[segment_indices]
size_segments_pd[segment_id] = len(segment_points)
if filter_segment_size > 0:
if size_segments_pd[segment_id] < filter_segment_size:
continue
for i in range(len(segment_points)):
point = segment_points[i]
k, idx, distance = tree.search_knn_vector_3d(point,1)
if distance[0] > max_distance: continue
segment_gt = segments_gt[idx][0]
if segment_gt not in instance2labelName: continue
if instance2labelName[segment_gt] == 'none': continue
if segment_id not in count_seg_pd_2_corresponding_seg_gts:
count_seg_pd_2_corresponding_seg_gts[segment_id] = dict()
if segment_gt not in count_seg_pd_2_corresponding_seg_gts[segment_id]:
count_seg_pd_2_corresponding_seg_gts[segment_id][segment_gt] = 0
count_seg_pd_2_corresponding_seg_gts[segment_id][segment_gt] += 1
if verbose or debug:
print('There are {} out of {} segments have found their correponding GT segments.'.\
format(len(count_seg_pd_2_corresponding_seg_gts),len(segment_ids)))
for k,i in count_seg_pd_2_corresponding_seg_gts.items():
print('\t{}: {}'.format(k,len(i)))
''' Find best corresponding segment '''
map_segment_pd_2_gt = dict() # map segment_pd to segment_gt
gt_segments_2_pd_segments = dict() # how many segment_pd corresponding to this segment_gt
for segment_id, cor_counter in count_seg_pd_2_corresponding_seg_gts.items():
size_pd = size_segments_pd[segment_id]
if verbose or debug: print('segment_id', segment_id, size_pd)
max_corr_ratio = -1
max_corr_seg = -1
list_corr_ratio = list()
for segment_gt, count in cor_counter.items():
size_gt = size_segments_gt[segment_gt]
corr_ratio = count/size_pd
list_corr_ratio.append(corr_ratio)
if corr_ratio > max_corr_ratio:
max_corr_ratio = corr_ratio
max_corr_seg = segment_gt
if verbose or debug: print('\t{0:s} {1:3d} {2:8d} {3:2.3f} {4:2.3f}'.\
format(instance2labelName[segment_gt],segment_gt,count, count/size_gt, corr_ratio))
if len(list_corr_ratio ) > 2:
list_corr_ratio = sorted(list_corr_ratio,reverse=True)
occ_ratio = list_corr_ratio[1]/list_corr_ratio[0]
else:
occ_ratio = 0
if max_corr_ratio > filter_corr_thres and occ_ratio < filter_occ_ratio:
'''
This is to prevent a segment is almost equally occupied two or more gt segments.
'''
if verbose or debug: print('add correspondence of segment {:s} {:4d} to label {:4d} with the ratio {:2.3f} {:1.3f}'.\
format(instance2labelName[segment_gt],segment_id,max_corr_seg,max_corr_ratio,occ_ratio))
map_segment_pd_2_gt[segment_id] = max_corr_seg
if max_corr_seg not in gt_segments_2_pd_segments:
gt_segments_2_pd_segments[max_corr_seg] = list()
gt_segments_2_pd_segments[max_corr_seg].append(segment_id)
else:
if verbose or debug: print('filter correspondence segment {:s} {:4d} to label {:4d} with the ratio {:2.3f} {:1.3f}'.\
format(instance2labelName[segment_gt],segment_id,max_corr_seg,max_corr_ratio,occ_ratio))
if verbose:
print('final correspondence:')
print(' pd gt')
for segment, label in sorted(map_segment_pd_2_gt.items()):
print("{:4d} {:4d}".format(segment,label))
print('final pd segments within the same gt segment')
for gt_segment, pd_segments in sorted(gt_segments_2_pd_segments.items()):
print('{} :'.format(gt_segment),end='')
for pd_segment in pd_segments:
print('{} '.format(pd_segment),end='')
print('')
''' Save as ply '''
if debug:
colors = util_label.get_NYU40_color_palette()
cloud_pd.visual.vertex_colors = [0,0,0,255]
for segment_pd, segment_gt in map_segment_pd_2_gt.items():
segment_indices = np.where(segments_pd == segment_pd)[0]
label = util_label.NYU40_Label_Names.index(instance2labelName[segment_gt])+1
color = colors[label]
for index in segment_indices:
cloud_pd.visual.vertex_colors[index][:3] = color
cloud_pd.export('tmp_corrcloud.ply')
'''' Save as relationship_*.json '''
list_relationships = list()
relationships = gen_relationship(0, map_segment_pd_2_gt, instance2labelName,
gt_segments_2_pd_segments)
if len(relationships["objects"]) != 0 and len(relationships['relationships']) != 0:
list_relationships.append(relationships)
return list_relationships, segs_neighbors
def gen_relationship(split:int, map_segment_pd_2_gt:dict,instance2labelName:dict,gt_segments_2_pd_segments:dict,
target_segments:list=None) -> dict:
'''' Save as relationship_*.json '''
relationships = dict()
relationships["scan"] = scan_id
relationships["split"] = split
objects = dict()
for seg, segment_gt in map_segment_pd_2_gt.items():
if target_segments is not None:
if seg not in target_segments: continue
name = instance2labelName[segment_gt]
assert(name != '-' and name != 'none')
objects[int(seg)] = name
relationships["objects"] = objects
split_relationships = list()
''' Build "same part" relationship '''
idx_in_txt_new = 0
for _, groups in gt_segments_2_pd_segments.items():
if target_segments is not None:
filtered_groups = list()
for g in groups:
if g in target_segments:
filtered_groups.append(g)
groups = filtered_groups
if len(groups) <= 1: continue
for i in range(len(groups)):
for j in range(i+1,len(groups)):
split_relationships.append([int(groups[i]),int(groups[j]), idx_in_txt_new, name_same_segment])
split_relationships.append([int(groups[j]),int(groups[i]), idx_in_txt_new, name_same_segment])
relationships["relationships"] = split_relationships
return relationships
if __name__ == '__main__':
args = Parser().parse_args()
debug |= args.debug
args.verbose |= args.debug
if debug:
args.verbose=True
if args.search_method == 'BBOX':
search_method = SAMPLE_METHODS.BBOX
elif args.search_method == 'KNN':
search_method = SAMPLE_METHODS.RADIUS
util.set_random_seed(2020)
import os,json
label_names, _, _ = util_label.getLabelMapping(args.label_type)
classes_json = list()
for key,value in label_names.items():
if value == '-':continue
classes_json.append(value)
target_scan=[]
if args.target_scan != '':
target_scan = util.read_txt_to_list(args.target_scan)
scan_ids = target_scan
else:
if args.type == 'train':
scan_ids = util.read_txt_to_list(define.SCANNET_SPLIT_TRAIN)
elif args.type == 'validation':
scan_ids = util.read_txt_to_list(define.SCANNET_SPLIT_VAL)
valid_scans=list()
relationships_new = dict()
relationships_new["scans"] = list()
relationships_new['neighbors'] = dict()
counter= 0
for scan_id in tqdm(sorted(scan_ids)):
# if len(target_scan) != 0: if scan_id not in target_scan: continue
if debug or args.verbose: print(scan_id)
relationships, segs_neighbors = process(define.SCANNET_DATA_PATH,scan_id,label_type = args.label_type, verbose = args.verbose)
valid_scans.append(scan_id)
relationships_new["scans"] += relationships
relationships_new['neighbors'][scan_id] = segs_neighbors
if debug: break
Path(args.pth_out).mkdir(parents=True, exist_ok=True)
pth_args = os.path.join(args.pth_out,'args.json')
with open(pth_args, 'w') as f:
tmp = vars(args)
json.dump(tmp, f, indent=2)
pth_relationships_json = os.path.join(args.pth_out, "relationships_" + args.type + ".json")
with open(pth_relationships_json, 'w') as f:
json.dump(relationships_new, f)
pth_classes = os.path.join(args.pth_out, | |
None, 4, None, None, "UserInformationTransportSettingFlags"),
0x31e9: (0x101f, None, None, 4, None, None, "UserInformationUMAddresses"),
0x31ea: (0x101f, None, None, 4, None, None, "UserInformationUMCallingLineIds"),
0x31eb: (0x101f, None, None, 4, None, None, "UserInformationUMDtmfMap"),
0x31ec: (0x0003, None, None, 4, None, None, "UserInformationUMEnabledFlags"),
0x31ed: (0x0003, None, None, 4, None, None, "UserInformationUMEnabledFlags2"),
0x31ee: (0x001f, None, None, 4, None, None, "UserInformationUMMailboxPolicy"),
0x31ef: (0x0102, None, None, 4, None, None, "UserInformationUMPinChecksum"),
0x31f0: (0x001f, None, None, 4, None, None, "UserInformationUMRecipientDialPlanId"),
0x31f1: (0x0003, None, None, 4, None, None, "UserInformationUMServerWritableFlags"),
0x31f2: (0x0102, None, None, 4, None, None, "UserInformationUMSpokenName"),
0x31f3: (0x101f, None, None, 4, None, None, "UserInformationUnifiedGroupEventSubscriptionBL"),
0x31f4: (0x101f, None, None, 4, None, None, "UserInformationUnifiedGroupEventSubscriptionLink"),
0x31f5: (0x001f, None, None, 4, None, None, "UserInformationUnifiedGroupFileNotificationsSettings"),
0x31f6: (0x101f, None, None, 4, None, None, "UserInformationUnifiedGroupMembersBL"),
0x31f7: (0x101f, None, None, 4, None, None, "UserInformationUnifiedGroupMembersLink"),
0x31f8: (0x001f, None, None, 4, None, None, "UserInformationUnifiedGroupProvisioningOption"),
0x31f9: (0x0003, None, None, 4, None, None, "UserInformationUnifiedGroupSecurityFlags"),
0x31fa: (0x0003, None, None, 4, None, None, "UserInformationUnifiedGroupSKU"),
0x31fb: (0x0102, None, None, 4, None, None, "UserInformationUnifiedMailboxAccount"),
0x31fc: (0x0003, None, None, 4, None, None, "UserInformationUserAccountControl"),
0x31fd: (0x001f, None, None, 4, None, None, "UserInformationUserPrincipalNameRaw"),
0x31fe: (0x000b, None, None, 4, None, None, "UserInformationUseDatabaseQuotaDefaults"),
0x31ff: (0x0014, None, None, 4, None, None, "UserInformationUsnChanged"),
0x3200: (0x0014, None, None, 4, None, None, "UserInformationUsnCreated"),
0x3201: (0x0002, None, None, 4, None, None, "UserInformationUserState"),
0x3202: (0x101f, None, None, 4, None, None, "UserInformationVoiceMailSettings"),
0x3203: (0x001f, None, None, 4, None, None, "UserInformationWhenChangedRaw"),
0x3204: (0x001f, None, None, 4, None, None, "UserInformationWhenCreatedRaw"),
0x3205: (0x001f, None, None, 4, None, None, "UserInformationWindowsEmailAddress"),
0x3206: (0x001f, None, None, 4, None, None, "UserInformationWindowsLiveID"),
0x3207: (0x001f, None, None, 4, None, None, "UserInformationYammerGroupAddress"),
0x3208: (0x001f, None, None, 4, None, None, "UserInformationOperatorNumber"),
0x3209: (0x0040, None, None, 4, None, None, "UserInformationWhenReadUTC"),
0x320a: (0x0003, None, None, 4, None, None, "UserInformationPreviousRecipientTypeDetailsHigh"),
0x320b: (0x0003, None, None, 4, None, None, "UserInformationRemoteRecipientTypeHigh"),
0x320c: (0x0003, None, None, 4, None, None, "UserInformationRecipientTypeDetailsValueHigh"),
0x320d: (0x0040, None, None, 4, None, None, "UserInformationFamilyMembersUpdateInProgressStartTime"),
0x320e: (0x000b, None, None, 4, None, None, "UserInformationIsFamilyMailbox"),
0x320f: (0x0040, None, None, 4, None, None, "UserInformationMailboxRegionLastUpdateTime"),
0x3210: (0x0003, None, None, 4, None, None, "UserInformationSubscribeExistingGroupMembersStatus"),
0x3211: (0x101f, None, None, 4, None, None, "UserInformationGroupMembers"),
0x3212: (0x0003, None, None, 4, None, None, "UserInformationRecipientDisplayTypeRaw"),
0x3213: (0x001f, None, None, 4, None, None, "UserInformationUITEntryVersion"),
0x3214: (0x001f, None, None, 4, None, None, "UserInformationLastRefreshedFrom"),
0x3215: (0x000b, None, None, 4, None, None, "UserInformationIsGroupMailBox"),
0x3216: (0x0003, None, None, 4, None, None, "UserInformationMailboxFolderSet"),
0x3217: (0x000b, None, None, 4, None, None, "UserInformationWasInactiveMailbox"),
0x3218: (0x0040, None, None, 4, None, None, "UserInformationInactiveMailboxRetireTime"),
0x3219: (0x0040, None, None, 4, None, None, "UserInformationOrphanSoftDeleteTrackingTime"),
0x321a: (0x0014, None, None, 4, None, None, "UserInformationSubscriptions"),
0x321b: (0x001f, None, None, 4, None, None, "UserInformationOtherMail"),
0x321c: (0x000b, None, None, 4, None, None, "UserInformationIsCIDAddedToMserv"),
0x321d: (0x0003, None, None, 4, None, None, "UserInformationMailboxWorkloads"),
0x321e: (0x0040, None, None, 4, None, None, "UserInformationCacheLastAccessTime"),
0x3233: (0x000b, None, None, 4, None, None, "UserInformationPublicFolderClientAccess"),
0x330b: (0x0003, None, None, 4, None, None, "BigFunnelLargePOITableTotalPages"),
0x330c: (0x0003, None, None, 4, None, None, "BigFunnelLargePOITableAvailablePages"),
0x330d: (0x0014, None, None, 4, None, None, "BigFunnelPOISize"),
0x330e: (0x0003, None, None, 4, None, None, "BigFunnelMessageCount"),
0x330f: (0x000b, None, None, 4, None, None, "FastIsEnabled"),
0x3310: (0x0003, None, None, 4, None, None, "NeedsToMove"),
0x3311: (0x0003, None, None, 4, None, None, "MCDBMessageTableTotalPages"),
0x3312: (0x0003, None, None, 4, None, None, "MCDBMessageTableAvailablePages"),
0x3313: (0x0003, None, None, 4, None, None, "MCDBOtherTablesTotalPages"),
0x3314: (0x0003, None, None, 4, None, None, "MCDBOtherTablesAvailablePages"),
0x3315: (0x0003, None, None, 4, None, None, "MCDBBigFunnelFilterTableTotalPages"),
0x3316: (0x0003, None, None, 4, None, None, "MCDBBigFunnelFilterTableAvailablePages"),
0x3317: (0x0003, None, None, 4, None, None, "MCDBBigFunnelLargePOITableTotalPages"),
0x3318: (0x0003, None, None, 4, None, None, "MCDBBigFunnelLargePOITableAvailablePages"),
0x3319: (0x0014, None, None, 4, None, None, "MCDBSize"),
0x3320: (0x0014, None, None, 4, None, None, "MCDBAvailableSpace"),
0x3321: (0x0003, None, None, 4, None, None, "MCDBBigFunnelPostingListTableTotalPages"),
0x3322: (0x0003, None, None, 4, None, None, "MCDBBigFunnelPostingListTableAvailablePages"),
0x3323: (0x0005, None, None, 4, None, None, "MCDBMessageTablePercentReplicated"),
0x3324: (0x0005, None, None, 4, None, None, "MCDBBigFunnelFilterTablePercentReplicated"),
0x3325: (0x0005, None, None, 4, None, None, "MCDBBigFunnelLargePOITablePercentReplicated"),
0x3326: (0x0005, None, None, 4, None, None, "MCDBBigFunnelPostingListTablePercentReplicated"),
0x3327: (0x0003, None, None, 4, None, None, "BigFunnelMailboxCreationVersion"),
0x3328: (0x0003, None, None, 4, None, None, "BigFunnelAttributeVectorCommonVersion"),
0x3329: (0x0003, None, None, 4, None, None, "BigFunnelAttributeVectorSharePointVersion"),
0x3330: (0x0014, None, None, 4, None, None, "BigFunnelIndexedSize"),
0x3331: (0x0014, None, None, 4, None, None, "BigFunnelPartiallyIndexedSize"),
0x3332: (0x0014, None, None, 4, None, None, "BigFunnelNotIndexedSize"),
0x3333: (0x0014, None, None, 4, None, None, "BigFunnelCorruptedSize"),
0x3334: (0x0014, None, None, 4, None, None, "BigFunnelStaleSize"),
0x3335: (0x0014, None, None, 4, None, None, "BigFunnelShouldNotBeIndexedSize"),
0x3336: (0x0003, None, None, 4, None, None, "BigFunnelIndexedCount"),
0x3337: (0x0003, None, None, 4, None, None, "BigFunnelPartiallyIndexedCount"),
0x3338: (0x0003, None, None, 4, None, None, "BigFunnelNotIndexedCount"),
0x3339: (0x0003, None, None, 4, None, None, "BigFunnelCorruptedCount"),
0x333a: (0x0003, None, None, 4, None, None, "BigFunnelStaleCount"),
0x333b: (0x0003, None, None, 4, None, None, "BigFunnelShouldNotBeIndexedCount"),
0x333c: (0x0005, None, None, 4, None, None, "BigFunnelL1Rank"),
0x333d: (0x0002, None, None, 4, None, None, "BigFunnelResultSets"),
0x333e: (0x000b, None, None, 4, None, None, "BigFunnelMaintainRefiners"),
0x333f: (0x0003, None, None, 4, None, None, "BigFunnelPostingListTableBuckets"),
0x3340: (0x0003, None, None, 4, None, None, "BigFunnelPostingListTargetTableBuckets"),
0x3341: (0x101f, None, None, 4, None, None, "BigFunnelL1FeatureNames"),
0x3342: (0x1003, None, None, 4, None, None, "BigFunnelL1FeatureValues"),
0x3343: (0x0003, None, None, 4, None, None, "MCDBLogonScenarioTotalPages"),
0x3344: (0x0003, None, None, 4, None, None, "MCDBLogonScenarioAvailablePages"),
0x3345: (0x0003, None, None, 4, None, None, "BigFunnelMasterIndexVersion"),
0x33f0: (0x0102, None, None, 4, None, None, "ControlDataForRecordReviewNotificationTBA"),
0x33fe: (0x0102, None, None, 4, None, None, "ControlDataForBigFunnelQueryParityAssistant"),
0x33ff: (0x0003, None, None, 4, None, None, "BigFunnelQueryParityAssistantVersion"),
0x3401: (0x0003, None, None, 4, None, None, "MessageTableTotalPages"),
0x3402: (0x0003, None, None, 4, None, None, "MessageTableAvailablePages"),
0x3403: (0x0003, None, None, 4, None, None, "OtherTablesTotalPages"),
0x3404: (0x0003, None, None, 4, None, None, "OtherTablesAvailablePages"),
0x3405: (0x0003, None, None, 4, None, None, "AttachmentTableTotalPages"),
0x3406: (0x0003, None, None, 4, None, None, "AttachmentTableAvailablePages"),
0x3407: (0x0003, None, None, 4, None, None, "MailboxTypeVersion"),
0x3408: (0x1048, None, None, 4, None, None, "MailboxPartitionMailboxGuids"),
0x3409: (0x0003, None, None, 4, None, None, "BigFunnelFilterTableTotalPages"),
0x340a: (0x0003, None, None, 4, None, None, "BigFunnelFilterTableAvailablePages"),
0x340b: (0x0003, None, None, 4, None, None, "BigFunnelPostingListTableTotalPages"),
0x340c: (0x0003, None, None, 4, None, None, "BigFunnelPostingListTableAvailablePages"),
0x3417: (0x001f, None, None, 4, None, None, "ProviderDisplayIcon"),
0x3418: (0x001f, None, None, 4, None, None, "ProviderDisplayName"),
0x3432: (0x0102, None, None, 4, None, None, "ControlDataForDirectoryProcessorAssistant"),
0x3433: (0x000b, None, None, 4, None, None, "NeedsDirectoryProcessor"),
0x3434: (0x101f, None, None, 4, None, None, "RetentionQueryIds"),
0x3435: (0x0014, None, None, 4, None, None, "RetentionQueryInfo"),
0x3436: (0x0040, None, None, 4, None, None, "MailboxLastProcessedTimestamp"),
0x3437: (0x0102, None, None, 4, None, None, "ControlDataForPublicFolderAssistant"),
0x3438: (0x0102, None, None, 4, None, None, "ControlDataForInferenceTrainingAssistant"),
0x3439: (0x000b, None, None, 4, None, None, "InferenceEnabled"),
0x343b: (0x0003, None, None, 4, None, None, "ContactLinking"),
0x343c: (0x0102, None, None, 4, None, None, "ControlDataForOABGeneratorAssistant"),
0x343d: (0x0003, None, None, 4, None, None, "ContactSaveVersion"),
0x3440: (0x0102, None, None, 4, None, None, "PushNotificationSubscriptionType"),
0x3442: (0x0102, None, None, 4, None, None, "ControlDataForInferenceDataCollectionAssistant"),
0x3443: (0x0102, None, None, 4, None, None, "InferenceDataCollectionProcessingState"),
0x3444: (0x0102, None, None, 4, None, None, "ControlDataForPeopleRelevanceAssistant"),
0x3445: (0x0003, None, None, 4, None, None, "SiteMailboxInternalState"),
0x3446: (0x0102, None, None, 4, None, None, "ControlDataForSiteMailboxAssistant"),
0x3447: (0x0003, None, None, 4, None, None, "InferenceTrainingLastContentCount"),
0x3448: (0x0040, None, None, 4, None, None, "InferenceTrainingLastAttemptTimestamp"),
0x3449: (0x0040, None, None, 4, None, None, "InferenceTrainingLastSuccessTimestamp"),
0x344a: (0x0003, None, None, 4, None, None, "InferenceUserCapabilityFlags"),
0x344b: (0x0102, None, None, 4, None, None, "ControlDataForMailboxAssociationReplicationAssistant"),
0x344c: (0x0040, None, None, 4, None, None, "MailboxAssociationNextReplicationTime"),
0x344d: (0x0003, None, None, 4, None, None, "MailboxAssociationProcessingFlags"),
0x344e: (0x0102, None, None, 4, None, None, "ControlDataForSharePointSignalStoreAssistant"),
0x344f: (0x0102, None, None, 4, None, None, "ControlDataForPeopleCentricTriageAssistant"),
0x3450: (0x0003, None, None, 4, None, None, "NotificationBrokerSubscriptions"),
0x3452: (0x0014, None, None, 4, None, None, "ElcLastRunTotalProcessingTime"),
0x3453: (0x0014, None, None, 4, None, None, "ElcLastRunSubAssistantProcessingTime"),
0x3454: (0x0014, None, None, 4, None, None, "ElcLastRunUpdatedFolderCount"),
0x3455: (0x0014, None, None, 4, None, None, "ElcLastRunTaggedFolderCount"),
0x3456: (0x0014, None, None, 4, None, None, "ElcLastRunUpdatedItemCount"),
0x3457: (0x0014, None, None, 4, None, None, "ElcLastRunTaggedWithArchiveItemCount"),
0x3458: (0x0014, None, None, 4, None, None, "ElcLastRunTaggedWithExpiryItemCount"),
0x3459: (0x0014, None, None, 4, None, None, "ElcLastRunDeletedFromRootItemCount"),
0x345a: (0x0014, None, None, 4, None, None, "ElcLastRunDeletedFromDumpsterItemCount"),
0x345b: (0x0014, None, None, 4, None, None, "ElcLastRunArchivedFromRootItemCount"),
0x345c: (0x0014, None, None, 4, None, None, "ElcLastRunArchivedFromDumpsterItemCount"),
0x345d: (0x0040, None, None, 4, None, None, "ScheduledISIntegLastFinished"),
0x345f: (0x0040, None, None, 4, None, None, "ELCLastSuccessTimestamp"),
0x3460: (0x0040, None, | |
<gh_stars>0
"""
Sources: <NAME>, https://docs.python.org/2/tutorial/datastructures.html
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
import random
black = Color(0x000000, 1.0)
white = Color(0xffffff, 1.0)
clear = Color(0xffffff, 0.0)
red = Color(0xff0000, 1.0)
blue = Color(0x0000ff, 1.0)
whiteline = LineStyle(1,white)
thinline = LineStyle(1, black)
thickline = LineStyle(5, black)
medline = LineStyle(4, black)
oline = LineStyle(8, blue)
xline = LineStyle(8, red)
vline = RectangleAsset(10, 310, thinline, black)
hline = RectangleAsset(310, 10, thinline, black)
oshape = CircleAsset(30, oline, clear)
xshape = PolygonAsset([(5,5),(35,35),(65,5),(5,65),(35,35),(65,65)], xline, red)
Sprite(vline, (140, 40))
Sprite(vline, (240, 40))
Sprite(hline, (40, 140))
Sprite(hline, (40, 240))
osprites = [ ]
xsprites = [ ]
playeralive = [ ]
compalive = [ ]
for x in [95, 195, 295]:
for y in [95, 195, 295]:
so = Sprite(oshape, (x,y))
so.visible = False
osprites.append(so)
for x in [60, 160, 260]:
for y in [60, 160, 260]:
sx = Sprite(xshape, (x,y))
sx.visible = False
xsprites.append(sx)
choice = input("Would you like to be X's or O's? ")
player = 0
while player==0:
if choice=="x":
player = xsprites
playershape = xshape
comp = osprites
compshape = oshape
elif choice=="o":
player = osprites
playershape = oshape
comp = xsprites
compshape = xshape
else:
choice = input("Invalid input. Try again. Would you like to be X's or O's? ")
class Ttt(App):
def __init__(self):
super().__init__()
self.listenMouseEvent( 'click', self.click)
running = False
def click(self, event):
x = event.x
y = event.y
vstop = False
hstop = False
diagonal = False
hwin = False
vwin = False
dwin = False
dwin2 = False
for s in player:
if abs(x - s.x) <= 50 and abs(y - s.y) <= 50:
s.visible = True
player.remove(s)
for sprite in comp:
if abs(sprite.x - s.x) <= 40 and abs(sprite.y - s.y) <= 40:
comp.remove(sprite)
print(comp)
if len(playeralive)==0:
if (s.x==160 and s.y==160) or (s.x==195 and s.y==195):
for c in comp:
if c.x==c.y or abs(c.x-c.y)==200:
print("corner")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
else:
for c in comp:
if (c.x==160==c.y==160) or (c.x==195==c.y==195):
print("center")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
#Could make code more efficient by checking to see if there is already a comp shape in that row/column
if len(playeralive) >= 1:
for n in playeralive:
if n.x == s.x:
vstop = True
print("vstop on")
break
for nn in playeralive:
if nn.y == s.y:
hstop = True
print("hstop on")
break
#check the coordinates of different shapes??
for m in playeralive:
if abs(m.x-s.x)==100 and abs(m.y-s.y)==100:
diagonal = True
print("diagonal on")
for n in compalive:
if n.x==n.y and ( n.x == 160 or n.x == 195):
diagonal = False
print("diagonal off")
break
for mn in playeralive:
if mn.x==mn.y and (mn.x == 160 or mn.x == 195):
diagonal = True
break
else:
diagonal = False
print("diagonal off")
break
if len(compalive)>=2:
compx = []
compy = []
for ca in compalive:
compx.append(ca.x)
for cal in compalive:
compy.append(cal.y)
compx.sort()
compy.sort()
for n in range(len(compalive)-1):
if compx[n]==compx[n+1]:
winningx = n
vwin = True
print("vwin on")
for n in playeralive:
if abs(n.x-compx[winningx])==35 or abs(s.x-compx[winningx])==35:
vwin = False
print("vwin off")
break
break
for n in range(len(compalive)-1):
if compy[n]==compy[n+1]:
winningy = n
hwin=True
print("hwin on")
for n in playeralive:
if abs(n.y-compy[winningy])==35 or abs(s.y-compy[winningy])==35:
hwin = False
print("hwin off")
break
break
if compshape == xshape:
for n in compalive:
if n.y==160 and n.x==160:
for com in compalive:
if com.y==com.x and com.x!=160:
dwin = True
print("dwin on")
if com.x == 260:
winningd = 60
else:
winningd = 260
for p in playeralive:
if p.x==p.y and abs(p.x - winningd)==35 or (s.x==s.y and abs(s.x-winningd)==35):
dwin = False
print("dwin off")
break
break
if abs(com.y-com.x) == 200:
dwin2 = True
print("dwin2 on")
if com.x==260:
winningdx = 60
winningdy = 260
else:
winningdx = 260
winningdy = 60
for p in playeralive:
if (abs(p.x- winningdx)==35 and abs(p.y - winningdy)==35) or (abs(s.x- winningdx)==35 and abs(s.y - winningdy)==35):
dwin2 = False
print("dwin2 off")
break
break
if compshape==oshape:
for n in compalive:
if n.y==195 and n.x==195:
for com in compalive:
if com.y==com.x and com.x!=195:
dwin = True
print("dwin on")
if com.x == 295:
winningd = 95
else:
winningd = 295
for p in playeralive:
if p.x==p.y and abs(p.x - winningd)==35 or (s.x==s.y and abs(s.x-winningd)==35):
dwin = False
print("dwin off")
break
break
if abs(com.y-com.x) == 200:
dwin2 = True
print("dwin2 on")
if com.x==295:
winningdx = 95
winningdy = 295
else:
winningdx = 295
winningdy = 95
for p in playeralive:
if (abs(p.x- winningdx)==35 and abs(p.y - winningdy)==35) or (abs(s.x- winningdx)==35 and abs(s.y - winningdy)==35):
dwin2 = False
print("dwin2 off")
break
break
# FINISH THIS FOR DIAGONAL THE OTHER WAY
if vwin == True:
for c in comp:
if c.x == compx[winningx]:
print("vwin")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
elif hwin == True:
for c in comp:
if c.y == compy[winningy]:
print("hwin")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
elif dwin == True:
for c in comp:
if c.y==c.x and c.x == winningd:
print("dwin")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
elif dwin2 == True:
for c in comp:
if c.y==winningdy and c.x == winningdx:
print("dwin2")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
elif vstop == True:
for c in comp:
if abs(c.x - s.x)==35:
print("vstop")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(spr.y - c.y) <= 40:
player.remove(spr)
break
else:
if hstop == True:
for co in comp:
if abs(co.y - s.y)==35:
print("vstop, hstop")
co.visible = True
comp.remove(co)
compalive.append(co)
for spr in player:
if abs(spr.x - co.x) <= 40 and abs(co.y - spr.y) <= 40:
player.remove(spr)
break
else:
if diagonal == True:
for c in comp:
if (abs(s.x-c.x)==abs(s.y-c.y)) or ( abs((abs(s.x-c.x))-(abs(s.y-c.y)))==70 ):
print("vstop, hstop, diag")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(c.y - spr.y) <= 40:
player.remove(spr)
break
else:
print("vstop, hstop, random")
crandom = random.choice(comp)
crandom.visible = True
comp.remove(crandom)
compalive.append(crandom)
for spr in player:
if abs(spr.x - crandom.x) <= 40 and abs(crandom.y - spr.y) <= 40:
player.remove(spr)
break
else:
if diagonal == True:
for c in comp:
if (abs(s.x-c.x)==abs(s.y-c.y)) or ( abs((abs(s.x-c.x))-(abs(s.y-c.y)))==70 ):
print("vstop, diag")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(c.y - spr.y) <= 40:
player.remove(spr)
break
else:
print("vstop, diag, random")
crandom = random.choice(comp)
crandom.visible = True
comp.remove(crandom)
compalive.append(crandom)
for spr in player:
if abs(spr.x - crandom.x) <= 40 and abs(crandom.y - spr.y) <= 40:
player.remove(spr)
break
elif hstop == True:
for co in comp:
if abs(co.y - s.y)==35:
print("hstop")
co.visible = True
comp.remove(co)
compalive.append(co)
for spr in player:
if abs(spr.x - co.x) <= 40 and abs(co.y - spr.y) <= 40:
player.remove(spr)
break
else:
if diagonal == True:
for c in comp:
if (abs(s.x-c.x)==abs(s.y-c.y)) or ( abs((abs(s.x-c.x))-(abs(s.y-c.y)))==70 ):
print("hstop, diag")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if abs(spr.x - c.x) <= 40 and abs(c.y - spr.y) <= 40:
player.remove(spr)
break
else:
print("hstop, diag, random")
crandom = random.choice(comp)
crandom.visible = True
comp.remove(crandom)
compalive.append(crandom)
for spr in player:
if abs(spr.x - crandom.x) <= 40 and abs(crandom.y - spr.y) <= 40:
player.remove(spr)
break
elif diagonal == True:
for c in comp:
if (abs(s.x-c.x)==abs(s.y-c.y)) or ( abs((abs(s.x-c.x))-(abs(s.y-c.y)))==70 ):
print("diag")
c.visible = True
comp.remove(c)
compalive.append(c)
for spr in player:
if | |
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('ComponentInstanceID', node)
if value is not None and 'ComponentInstanceID' not in already_processed:
already_processed.add('ComponentInstanceID')
self.ComponentInstanceID = value
value = find_attr_value_('Primary', node)
if value is not None and 'Primary' not in already_processed:
already_processed.add('Primary')
self.Primary = value
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
value = find_attr_value_('Secondary', node)
if value is not None and 'Secondary' not in already_processed:
already_processed.add('Secondary')
self.Secondary = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ComponentInstanceDataType
class JointsMetaDataType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _archetype=None, _subtype=None, _instances=None, _desynched_atts=None, _id=None, ComponentInstanceData=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._archetype = _cast(None, _archetype)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._desynched_atts = _cast(None, _desynched_atts)
self._id = _cast(None, _id)
if ComponentInstanceData is None:
self.ComponentInstanceData = []
else:
self.ComponentInstanceData = ComponentInstanceData
def factory(*args_, **kwargs_):
if JointsMetaDataType.subclass:
return JointsMetaDataType.subclass(*args_, **kwargs_)
else:
return JointsMetaDataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ComponentInstanceData(self): return self.ComponentInstanceData
def set_ComponentInstanceData(self, ComponentInstanceData): self.ComponentInstanceData = ComponentInstanceData
def add_ComponentInstanceData(self, value): self.ComponentInstanceData.append(value)
def insert_ComponentInstanceData(self, index, value): self.ComponentInstanceData[index] = value
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def hasContent_(self):
if (
self.ComponentInstanceData
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='JointsMetaDataType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='JointsMetaDataType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='JointsMetaDataType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='JointsMetaDataType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
outfile.write(' _archetype=%s' % (self.gds_format_string(quote_attrib(self._archetype).encode(ExternalEncoding), input_name='_archetype'), ))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
outfile.write(' _subtype="%s"' % self.gds_format_boolean(self._subtype, input_name='_subtype'))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
outfile.write(' _instances=%s' % (self.gds_format_string(quote_attrib(self._instances).encode(ExternalEncoding), input_name='_instances'), ))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
outfile.write(' _desynched_atts=%s' % (self.gds_format_string(quote_attrib(self._desynched_atts).encode(ExternalEncoding), input_name='_desynched_atts'), ))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
outfile.write(' _id=%s' % (self.gds_format_string(quote_attrib(self._id).encode(ExternalEncoding), input_name='_id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='JointsMetaDataType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ComponentInstanceData_ in self.ComponentInstanceData:
ComponentInstanceData_.export(outfile, level, namespace_, name_='ComponentInstanceData', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='JointsMetaDataType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
showIndent(outfile, level)
outfile.write('_derived="%s",\n' % (self._derived,))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
showIndent(outfile, level)
outfile.write('_real_archetype=%s,\n' % (self._real_archetype,))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
showIndent(outfile, level)
outfile.write('_archetype="%s",\n' % (self._archetype,))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
showIndent(outfile, level)
outfile.write('_subtype=%s,\n' % (self._subtype,))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
showIndent(outfile, level)
outfile.write('_instances="%s",\n' % (self._instances,))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
showIndent(outfile, level)
outfile.write('_desynched_atts="%s",\n' % (self._desynched_atts,))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
showIndent(outfile, level)
outfile.write('_id="%s",\n' % (self._id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('ComponentInstanceData=[\n')
level += 1
for ComponentInstanceData_ in self.ComponentInstanceData:
showIndent(outfile, level)
outfile.write('model_.ComponentInstanceDataType(\n')
ComponentInstanceData_.exportLiteral(outfile, level, name_='ComponentInstanceDataType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('_derived', node)
if value is not None and '_derived' not in already_processed:
already_processed.add('_derived')
self._derived = value
value = find_attr_value_('_real_archetype', node)
if value is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
if value in ('true', '1'):
self._real_archetype = True
elif value in ('false', '0'):
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ComponentInstanceData':
obj_ = ComponentInstanceDataType.factory()
obj_.build(child_)
self.ComponentInstanceData.append(obj_)
obj_.original_tagname_ = 'ComponentInstanceData'
# end class JointsMetaDataType
class ConstrainedToComponentsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _archetype=None, _subtype=None, _instances=None, _desynched_atts=None, _id=None, ConstrainedToComponent=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._archetype = _cast(None, _archetype)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._desynched_atts = _cast(None, _desynched_atts)
self._id = _cast(None, _id)
if ConstrainedToComponent is None:
self.ConstrainedToComponent = []
else:
self.ConstrainedToComponent = ConstrainedToComponent
def factory(*args_, **kwargs_):
if ConstrainedToComponentsType.subclass:
return ConstrainedToComponentsType.subclass(*args_, **kwargs_)
else:
return ConstrainedToComponentsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ConstrainedToComponent(self): return self.ConstrainedToComponent
def set_ConstrainedToComponent(self, ConstrainedToComponent): self.ConstrainedToComponent = ConstrainedToComponent
def add_ConstrainedToComponent(self, value): self.ConstrainedToComponent.append(value)
def insert_ConstrainedToComponent(self, index, value): self.ConstrainedToComponent[index] = value
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def hasContent_(self):
if (
self.ConstrainedToComponent
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ConstrainedToComponentsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ConstrainedToComponentsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ConstrainedToComponentsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ConstrainedToComponentsType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
outfile.write(' _archetype=%s' % (self.gds_format_string(quote_attrib(self._archetype).encode(ExternalEncoding), input_name='_archetype'), ))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
outfile.write(' _subtype="%s"' % | |
#assert(str(v) == s)
s = 'frob 2012.4.1'
#v = Version.parse(s)
#assert(str(v) == s)
s = 'frob 2012.04.01'
#v = Version.parse(s)
#assert(str(v) == s)
s = 'frob 20120401'
#v = Version.parse(s)
#assert(v.release_date == date(2012, 4, 1))
def test_parse_semver():
####
#### Semver-specific tests (Semver 2.0.0)
####
s = '0.0.0' # legal according to semver 2.0.0
v = Version.parse(s)
assert(v.name is None)
assert(v.name_clean is None)
assert(type(v.major) is int)
assert(v.major == 0)
assert(type(v.minor) is int)
assert(v.minor == 0)
assert(type(v.patch) is int)
assert(v.patch == 0)
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(str(v) == s)
s = '0.0.1'
v = Version.parse(s)
assert(v.name is None)
assert(v.name_clean is None)
assert(v.major == 0)
assert(v.minor == 0)
assert(v.patch == 1)
assert(v.patch1 == 1)
assert(str(v) == s)
s = '0.0.99'
v = Version.parse(s)
assert(v.name is None)
assert(v.name_clean is None)
assert(v.major == 0)
assert(v.minor == 0)
assert(v.patch == 99)
assert(v.patch1 == 99)
assert(str(v) == s)
s = '0.0.99999999'
v = Version.parse(s)
assert(v.name is None)
assert(v.name_clean is None)
assert(type(v.major) is int)
assert(v.major == 0)
assert(type(v.minor) is int)
assert(v.minor == 0)
assert(type(v.patch) is int)
assert(v.patch == 99999999)
assert(type(v.patch1) is int)
assert(v.patch1 == 99999999)
assert(str(v) == s)
s = '0.999999999.0'
v = Version.parse(s)
assert(v.name is None)
assert(v.name_clean is None)
assert(v.major == 0)
assert(v.minor == 999999999)
assert(v.patch == 0)
assert(v.patch1 == 0)
assert(str(v) == s)
s = '99999999.0.0'
v = Version.parse(s)
assert(v.name is None)
assert(v.name_clean is None)
assert(v.major == 99999999)
assert(v.minor == 0)
assert(v.patch == 0)
assert(v.patch1 == 0)
assert(str(v) == s)
s = '1.0.0-alpha'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-alpha')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(v.patch2 is None)
assert(v.patch_str == '-alpha')
assert(str(v) == s)
s = '1.0.0-alpha1'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-alpha1')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(type(v.patch2) is int)
assert(v.patch2 == 1)
assert(v.patch_str == '-alpha')
assert(str(v) == s)
s = '1.0.0-alpha.1'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-alpha.1')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(type(v.patch2) is int)
assert(v.patch2 == 1)
assert(v.patch_str == '-alpha.')
assert(str(v) == s)
s = '1.0.0-1'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-1')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(type(v.patch2) is int)
assert(v.patch2 == 1)
assert(v.patch_str == '-')
assert(str(v) == s)
s = '1.0.0-0.1'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-0.1')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(type(v.patch2) is int)
assert(v.patch2 == 1)
assert(v.patch_str == '-0.')
assert(str(v) == s)
s = '1.0.0-1-2-3-4-5'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-1-2-3-4-5')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(type(v.patch2) is int)
assert(v.patch2 == 5)
assert(v.patch_str == '-1-2-3-4-')
assert(str(v) == s)
s = '1.0.0-1-0-1-0-1'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-1-0-1-0-1')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(type(v.patch2) is int)
assert(v.patch2 == 1)
assert(v.patch_str == '-1-0-1-0-')
assert(str(v) == s)
s = '1.0.0-1a2b3c'
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0-1a2b3c')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(v.patch2 is None)
assert(v.patch_str == '-1a2b3c')
assert(str(v) == s)
s = '1.0.0--' # legal ""
v = Version.parse(s)
assert(v.major == 1)
assert(v.minor == 0)
assert(type(v.patch) in STR_TYPES)
assert(v.patch == '0--')
assert(type(v.patch1) is int)
assert(v.patch1 == 0)
assert(v.patch2 is None)
assert(v.patch_str == '--')
assert(str(v) == s)
s = '1.0.0-a.b' # TODO interface. this is wrong
v = Version.parse(s)
#assert(v.major == 1)
#assert(v.minor == 0)
#assert(type(v.patch) in STR_TYPES)
#assert(v.patch == '0-a.b')
#assert(type(v.patch1) in STR_TYPES)
#assert(v.patch1 == 'a')
#assert(v.patch2 == 'b')
#assert(v.patch_str == '-a.b')
#assert(str(v) == s)
s = '1.0.0-A.B'
v = Version.parse(s)
assert(v.name is None)
assert(v.major == 1)
assert(v.minor == 0)
assert(v.patch == '0-A.B')
assert(v.patch1 == 0)
assert(v.patch2 is None)
assert(v.patch_str == '-A.B')
assert(str(v) == s)
s = '1.0.0-a-.b-.c-.d-'
v = Version.parse(s)
assert(v.name is None)
assert(v.major == 1)
assert(v.minor == 0)
assert(v.patch == '0-a-.b-.c-.d-')
assert(v.patch1 == 0)
assert(v.patch2 is None)
assert(str(v) == s)
s = '1.0.0-0.33.44.55'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-999.fiddlesticks.whoomp-there-it-is.ohyeah'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5.-'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5.z'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5.0'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+20130313144700'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-beta+exp.sha.5114f85'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+alpha'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+alpha1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+alpha.1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+0.1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+1-2-3-4-5'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+1-0-1-0-1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+1a2b3c'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+-' # legal ""
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+a.b'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+A.B'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+a-.b-.c-.d-'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+0.33.44.55'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+999.fiddlesticks.whoomp-there-it-is.ohyeah'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+a.2.X.5'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+a.2.X.5.-'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+a.2.X.5.z'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0+a.2.X.5.0'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-alpha+alpha'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-alpha1+alpha1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-alpha.1+alpha.1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-1+1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-0.1+0.1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-1-2-3-4-5+1-2-3-4-5'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-1-0-1-0-1+1-0-1-0-1'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-1a2b3c+1a2b3c'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0--+-' # legal ""
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.b+a.b'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-A.B+A.B'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a-.b-.c-.+a-.b-.c-.d-'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-0.33.44.55+0.33.44.55'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-999.fiddlesticks.whoomp-there-it-is.ohyeah+999.fiddlesticks.whoomp-there-it-is.ohyeah'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5+a.2.X.5'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5.-+a.2.X.5.-'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5.z+a.2.X.5.z'
v = Version.parse(s)
assert(str(v) == s)
s = '1.0.0-a.2.X.5.0+a.2.X.5.0'
v = Version.parse(s)
assert(str(v) == s)
def test_parse_wildcards():
s = 'OpenSSH *'
v = Version.parse(s)
assert(v.name_clean == 'openssh')
assert(type(v.major) in STR_TYPES)
assert(v.major == '*')
assert(v.minor is None)
assert(v.patch is None)
assert(v.name_sep == ' ')
assert(str(v) == s)
s = 'OpenSSH x'
v = Version.parse(s)
assert(v.name_clean == 'openssh')
assert(type(v.major) in STR_TYPES)
assert(v.major == 'x')
assert(v.minor is None)
assert(v.patch is None)
assert(v.name_sep == ' ')
assert(str(v) == s)
s = 'OpenSSH X'
v = Version.parse(s)
assert(v.name_clean == 'openssh')
assert(type(v.major) in STR_TYPES)
assert(v.major == 'X')
assert(v.minor is None)
assert(v.patch is None)
assert(v.name_sep == ' ')
assert(str(v) == s)
s = 'OpenSSH 4.x'
v = Version.parse(s)
assert(v.name_clean == 'openssh')
assert(type(v.major) is int)
assert(v.major == 4)
assert(type(v.minor) in STR_TYPES)
assert(v.minor == 'x')
assert(v.patch is None)
assert(v.name_sep == ' ')
assert(v.version_sep == '.')
assert(str(v) == s)
s = 'OpenSSH 4.X'
v = Version.parse(s)
assert(v.name_clean == 'openssh')
assert(type(v.major) is int)
assert(v.major == 4)
assert(type(v.minor) in STR_TYPES)
assert(v.minor == 'X')
assert(v.patch is None)
assert(v.name_sep == ' ')
assert(v.version_sep == '.')
assert(str(v) == s)
s = 'OpenSSH 4.*'
v = | |
<gh_stars>100-1000
import os
import re
from typing import List, Dict
import codecs
import json
from itertools import chain
import logging
from collections import Counter
import numpy as np
import baseline.data
from baseline.vectorizers import Dict1DVectorizer, Token1DVectorizer, create_vectorizer, HasPredefinedVocab
from baseline.utils import import_user_module, revlut, exporter, optional_params, Offsets, listify, SingleFileDownloader
from eight_mile.progress import create_progress_bar
__all__ = []
export = exporter(__all__)
logger = logging.getLogger('baseline')
BASELINE_READERS = {}
@export
@optional_params
def register_reader(cls, task, name=None):
"""Register your own `Reader`
Use this pattern if you want to provide an override to a `Reader` class.
"""
if name is None:
name = cls.__name__
if task not in BASELINE_READERS:
BASELINE_READERS[task] = {}
if name in BASELINE_READERS[task]:
raise Exception('Error: attempt to re-defined previously registered handler {} for task {} in registry'.format(name, task))
BASELINE_READERS[task][name] = cls
return cls
@export
def create_reader(task, vectorizers, trim, **kwargs):
name = kwargs.get('type', kwargs.get('reader_type', 'default'))
Constructor = BASELINE_READERS[task][name]
return Constructor(vectorizers, trim, **kwargs)
@export
def num_lines(filename):
"""Counts the number of lines in a file.
:param filename: `str` The name of the file to count the lines of.
:returns: `int` The number of lines.
"""
with codecs.open(filename, encoding='utf-8', mode='r') as f:
return sum(1 for _ in f)
def _all_predefined_vocabs(vectorizers):
return all(isinstance(v, HasPredefinedVocab) for v in vectorizers.values())
def _filter_vocab(vocab, min_fs):
"""Filter down the vocab based on rules in the vectorizers.
:param vocab: `dict[Counter]`: A dict of vocabs.
:param min_fs: `dict[int]: A dict of cutoffs.
Note:
Any key in the min_fs dict should appear in the vocab dict.
:returns: `dict[dict]`: A dict of new filtered vocabs.
"""
for k, min_f in min_fs.items():
# If we don't filter then skip to save an iteration through the vocab
if min_f == -1:
continue
vocab[k] = dict(filter(lambda x: x[1] >= min_f, vocab[k].items()))
return vocab
def _read_from_col(col, files, col_splitter=r'\t', word_splitter=r'\s'):
"""Read from a single column of a file.
:param col: `int`: The column to read from.
:param files: List[str]: A list of files to read from.
:param col_splitter: `str`: The regex that splits a line into columns.
:param word_splitter: `str`: The regex that will split a column into words.
:returns: List[str]: The text from the col of each file.
"""
text = []
for file_name in files:
if file_name is None:
continue
with codecs.open(file_name, encoding='utf-8', mode='r') as f:
for line in f:
line = line.rstrip('\n')
if line == "":
continue
cols = re.split(col_splitter, line)
text.append(re.split(word_splitter, cols[col]))
return text
def _build_vocab_for_col(col, files, vectorizers, text=None, col_splitter=r'\t', word_splitter=r'\s'):
"""Build vocab from a single column in file. (separated by `\t`).
Used to read a vocab from a single conll column, read a vocab from the
source or target of a seq2seq file, or reading from a vocab file.
:param col: `int`: The column to read from.
:param files: List[str]: A list of files to read from.
:param vectorizers: dict[str] -> Vectorizer: The vectorizer to use to count the column
:param text: List[str]: The text from the columns or None
:param col_splitter: `str`: The regex that splits a line into columns.
:param word_splitter: `str`: The regex that will split a column into words.
:returns: dict[str] -> dict[str] -> int: The vocabs.
"""
text = _read_from_col(col, files, col_splitter, word_splitter) if text is None else text
vocab = {k: Counter() for k in vectorizers}
for t in text:
for k, vect in vectorizers.items():
vocab[k].update(vect.count(t))
return vocab
def _check_lens(vectorizers):
failures = set()
for k, vect in vectorizers.items():
mxlen = getattr(vect, 'mxlen', None)
if mxlen == -1:
failures.add(k)
return failures
def _vocab_allowed(vectorizers):
fails = _check_lens(vectorizers)
if fails:
fail_str = "When using a vocab file mxlen for vectorizers must not be `-1`\n"
vect_str = "\n".join("\t{}".format(fails))
raise RuntimeError(fail_str + vect_str)
@export
class ParallelCorpusReader:
def __init__(self, vectorizers, trim=False, truncate=False):
super().__init__()
self.src_vectorizers = {}
self.tgt_vectorizer = None
for k, vectorizer in vectorizers.items():
if k == 'tgt':
self.tgt_vectorizer = vectorizer
if not self.tgt_vectorizer.emit_begin_tok:
self.tgt_vectorizer.emit_begin_tok.append(Offsets.VALUES[Offsets.GO])
if not self.tgt_vectorizer.emit_end_tok:
self.tgt_vectorizer.emit_end_tok.append(Offsets.VALUES[Offsets.EOS])
else:
self.src_vectorizers[k] = vectorizer
self.trim = trim
self.truncate = truncate
def build_vocabs(self, files, **kwargs):
pass
def load_examples(self, tsfile, vocab1, vocab2, shuffle, sort_key):
pass
def load(self, tsfile, vocab1, vocab2, batchsz, shuffle=False, sort_key=None):
examples = self.load_examples(tsfile, vocab1, vocab2, shuffle, sort_key)
return baseline.data.ExampleDataFeed(examples, batchsz,
shuffle=shuffle, trim=self.trim, sort_key=sort_key, truncate=self.truncate)
@register_reader(task='seq2seq', name='tsv')
class TSVParallelCorpusReader(ParallelCorpusReader):
def __init__(self, vectorizers,
trim=False, truncate=False, src_col_num=0, tgt_col_num=1, **kwargs):
super().__init__(vectorizers, trim, truncate)
self.src_col_num = src_col_num
self.tgt_col_num = tgt_col_num
def build_vocabs(self, files, **kwargs):
if _all_predefined_vocabs(self.src_vectorizers) and isinstance(self.tgt_vectorizer, HasPredefinedVocab):
logger.info("Skipping building vocabulary. All vectorizers have predefined vocabs!")
return {k: v.vocab for k, v in self.src_vectorizers.items()}, self.tgt_vectorizer.vocab
vocab_file = kwargs.get('vocab_file')
if vocab_file is not None:
all_vects = self.src_vectorizers.copy()
all_vects['tgt'] = self.tgt_vectorizer
_vocab_allowed(all_vects)
# Only read the file once
text = _read_from_col(0, listify(vocab_file))
src_vocab = _build_vocab_for_col(None, None, self.src_vectorizers, text=text)
tgt_vocab = _build_vocab_for_col(None, None, {'tgt': self.tgt_vectorizer}, text=text)
return src_vocab, tgt_vocab['tgt']
src_vocab = _build_vocab_for_col(self.src_col_num, files, self.src_vectorizers)
tgt_vocab = _build_vocab_for_col(self.tgt_col_num, files, {'tgt': self.tgt_vectorizer})
min_f = kwargs.get('min_f', {})
tgt_min_f = {'tgt': min_f.pop('tgt', -1)}
src_vocab = _filter_vocab(src_vocab, min_f)
tgt_vocab = _filter_vocab(tgt_vocab, tgt_min_f)
return src_vocab, tgt_vocab['tgt']
def load_examples(self, tsfile, src_vocabs, tgt_vocab, do_shuffle, src_sort_key):
ts = []
with codecs.open(tsfile, encoding='utf-8', mode='r') as f:
for line in f:
splits = re.split("\t", line.strip())
src = list(filter(lambda x: len(x) != 0, re.split("\s+", splits[0])))
example = {}
for k, vectorizer in self.src_vectorizers.items():
example[k], length = vectorizer.run(src, src_vocabs[k])
if length is not None:
example['{}_lengths'.format(k)] = length
tgt = list(filter(lambda x: len(x) != 0, re.split("\s+", splits[1])))
example['tgt'], example['tgt_lengths'] = self.tgt_vectorizer.run(tgt, tgt_vocab)
ts.append(example)
return baseline.data.Seq2SeqExamples(ts, do_shuffle=do_shuffle, src_sort_key=src_sort_key)
@export
@register_reader(task='seq2seq', name='default')
class MultiFileParallelCorpusReader(ParallelCorpusReader):
def __init__(self, vectorizers, trim=False, truncate=False, **kwargs):
super().__init__(vectorizers, trim, truncate)
pair_suffix = kwargs['pair_suffix']
self.src_suffix = pair_suffix[0]
self.tgt_suffix = pair_suffix[1]
if not self.src_suffix.startswith('.'):
self.src_suffix = '.' + self.src_suffix
if not self.tgt_suffix.startswith('.'):
self.tgt_suffix = '.' + self.tgt_suffix
def build_vocabs(self, files, **kwargs):
if _all_predefined_vocabs(self.src_vectorizers) and isinstance(self.tgt_vectorizer, HasPredefinedVocab):
logger.info("Skipping building vocabulary. All vectorizers have predefined vocabs!")
return {k: v.vocab for k, v in self.src_vectorizers.items()}, self.tgt_vectorizer.vocab
vocab_file = kwargs.get('vocab_file')
if vocab_file is not None:
all_vects = self.src_vectorizers.copy()
all_vects['tgt'] = self.tgt_vectorizer
_vocab_allowed(all_vects)
# Only read the file once.
text = _read_from_col(0, listify(vocab_file))
src_vocab = _build_vocab_for_col(None, None, self.src_vectorizers, text=text)
tgt_vocab = _build_vocab_for_col(None, None, {'tgt': self.tgt_vectorizer}, text=text)
return src_vocab, tgt_vocab['tgt']
src_vocab = _build_vocab_for_col(0, [f + self.src_suffix for f in files], self.src_vectorizers)
tgt_vocab = _build_vocab_for_col(0, [f + self.tgt_suffix for f in files], {'tgt': self.tgt_vectorizer})
min_f = kwargs.get('min_f', {})
tgt_min_f = {'tgt': min_f.pop('tgt', -1)}
src_vocab = _filter_vocab(src_vocab, min_f)
tgt_vocab = _filter_vocab(tgt_vocab, tgt_min_f)
return src_vocab, tgt_vocab['tgt']
def load_examples(self, tsfile, src_vocabs, tgt_vocab, do_shuffle, src_sort_key):
ts = []
with codecs.open(tsfile + self.src_suffix, encoding='utf-8', mode='r') as fsrc:
with codecs.open(tsfile + self.tgt_suffix, encoding='utf-8', mode='r') as ftgt:
for src, tgt in zip(fsrc, ftgt):
example = {}
src = re.split("\s+", src.strip())
for k, vectorizer in self.src_vectorizers.items():
example[k], length = vectorizer.run(src, src_vocabs[k])
if length is not None:
example['{}_lengths'.format(k)] = length
tgt = re.split("\s+", tgt.strip())
example['tgt'], example['tgt_lengths'] = self.tgt_vectorizer.run(tgt, tgt_vocab)
ts.append(example)
return baseline.data.Seq2SeqExamples(ts, do_shuffle=do_shuffle, src_sort_key=src_sort_key)
def _try_read_labels(**kwargs):
label_file = kwargs.get('label_file')
label_list = kwargs.get('label_list')
label2index = {}
if label_file:
pre_labels = Counter(chain(*_read_from_col(0, listify(label_file))))
label2index = {l: i for i, l in enumerate(pre_labels)}
elif label_list:
label2index = {l: i for i, l in enumerate(label_list)}
return label2index
@export
class SeqPredictReader:
def __init__(self, vectorizers, trim=False, truncate=False, mxlen=-1, **kwargs):
super().__init__()
self.vectorizers = vectorizers
self.trim = trim
self.truncate = truncate
label_vectorizer_spec = kwargs.get('label_vectorizer', None)
if label_vectorizer_spec:
cache = label_vectorizer_spec.get("data_download_cache", os.path.expanduser("~/.bl-data"))
if 'model_file' in label_vectorizer_spec:
label_vectorizer_spec['model_file'] = SingleFileDownloader(label_vectorizer_spec['model_file'], cache).download()
if 'vocab_file' in label_vectorizer_spec:
label_vectorizer_spec['vocab_file'] = SingleFileDownloader(label_vectorizer_spec['vocab_file'], cache).download()
if 'transform' in label_vectorizer_spec:
label_vectorizer_spec['transform_fn'] = label_vectorizer_spec['transform']
if 'transform_fn' in label_vectorizer_spec and isinstance(label_vectorizer_spec['transform_fn'], str):
label_vectorizer_spec['transform_fn'] = eval(label_vectorizer_spec['transform_fn'])
self.label_vectorizer = create_vectorizer(**label_vectorizer_spec)
else:
self.label_vectorizer = Dict1DVectorizer(fields='y', mxlen=mxlen)
self.label2index = {
Offsets.VALUES[Offsets.PAD]: Offsets.PAD,
Offsets.VALUES[Offsets.GO]: Offsets.GO,
Offsets.VALUES[Offsets.EOS]: Offsets.EOS
}
def build_vocab(self, files, **kwargs):
if _all_predefined_vocabs(self.vectorizers):
vocabs = {k: v.vocab for k, v in self.vectorizers.items()}
have_vocabs = True
else:
have_vocabs = False
vocabs = {k: Counter() for k in self.vectorizers.keys()}
# TODO: Im not sure we should even support this option
label2index = _try_read_labels(**kwargs)
if label2index and have_vocabs:
offset = len(label2index)
# If the label list contains all of our special tokens, just reassign to the read in labels
if all(k in label2index for k in self.label2index):
self.label2index = label2index
# If the label list doesnt contain all our special tokens, prepend them
# TODO: This is a bit dangerous, what if some of them are in there?
else:
self.label2index.update({k: v + offset} for k, v in label2index.items())
logger.info("Skipping building vocabulary. All vectorizers have | |
os.path.join(b + '/textures/items', str('chestplateiron.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/chicken_cooked.png', b + '/textures/items')
if 'chicken_cooked.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'chicken_cooked.png'),
os.path.join(b + '/textures/items', str('chickenCooked.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/chicken_raw.png', b + '/textures/items')
if 'chicken_raw.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'chicken_raw.png'),
os.path.join(b + '/textures/items', str('chickenraw.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/clay_ball.png', b + '/textures/items')
if 'clay_ball.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'clay_ball.png'),
os.path.join(b + '/textures/items', str('clay.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/clock.png', b + '/textures/items')
if 'clock.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'clock.png'),
os.path.join(b + '/textures/items', str('clock.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/coal.png', b + '/textures/items')
if 'coal.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'coal.png'),
os.path.join(b + '/textures/items', str('coal.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/comparator.png', b + '/textures/items')
if 'comparator.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'comparator.png'),
os.path.join(b + '/textures/items', str('comparator.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/compass.png', b + '/textures/items')
if 'compass.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'compass.png'),
os.path.join(b + '/textures/items', str('compass.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/cookie.png', b + '/textures/items')
if 'cookie.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'cookie.png'),
os.path.join(b + '/textures/items', str('cookie.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/diamond.png', b + '/textures/items')
if 'diamond.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'diamond.png'),
os.path.join(b + '/textures/items', str('diamond.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/repeater.png', b + '/textures/items')
if 'repeater.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'repeater.png'),
os.path.join(b + '/textures/items', str('diode.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/door_iron.png', b + '/textures/items')
if 'door_iron.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'door_iron.png'),
os.path.join(b + '/textures/items', str('doorIron.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/door_wood.png', b + '/textures/items')
if 'door_wood.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'door_wood.png'),
os.path.join(b + '/textures/items', str('doorWood.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_black.png', b + '/textures/items')
if 'dye_powder_black.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_black.png'),
os.path.join(b + '/textures/items', str('dyePowder_black.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_blue.png', b + '/textures/items')
if 'dye_powder_blue.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_blue.png'),
os.path.join(b + '/textures/items', str('dyePowder_blue.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_brown.png', b + '/textures/items')
if 'dye_powder_brown.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_brown.png'),
os.path.join(b + '/textures/items', str('dyePowder_brown.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_cyan.png', b + '/textures/items')
if 'dye_powder_cyan.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_cyan.png'),
os.path.join(b + '/textures/items', str('dyePowder_cyan.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_gray.png', b + '/textures/items')
if 'dye_powder_gray.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_gray.png'),
os.path.join(b + '/textures/items', str('dyePowder_gray.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_green.png', b + '/textures/items')
if 'dye_powder_green.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_green.png'),
os.path.join(b + '/textures/items', str('dyePowder_green.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_light_blue.png', b + '/textures/items')
if 'dye_powder_light_blue.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_light_blue.png'),
os.path.join(b + '/textures/items', str('dyePowder_lightBlue.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_lime.png', b + '/textures/items')
if 'dye_powder_lime.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_lime.png'),
os.path.join(b + '/textures/items', str('dyePowder_lime.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_magenta.png', b + '/textures/items')
if 'dye_powder_magenta.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_magenta.png'),
os.path.join(b + '/textures/items', str('dyePowder_magenta.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_orange.png', b + '/textures/items')
if 'dye_powder_orange.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_orange.png'),
os.path.join(b + '/textures/items', str('dyePowder_orange.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_pink.png', b + '/textures/items')
if 'dye_powder_pink.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_pink.png'),
os.path.join(b + '/textures/items', str('dyePowder_pink.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_purple.png', b + '/textures/items')
if 'dye_powder_purple.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_purple.png'),
os.path.join(b + '/textures/items', str('dyePowder_purple.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_red.png', b + '/textures/items')
if 'dye_powder_red.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_red.png'),
os.path.join(b + '/textures/items', str('dyePowder_red.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_silver.png', b + '/textures/items')
if 'dye_powder_silver.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_silver.png'),
os.path.join(b + '/textures/items', str('dyePowder_silver.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_white.png', b + '/textures/items')
if 'dye_powder_white.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_white.png'),
os.path.join(b + '/textures/items', str('dyePowder_white.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dye_powder_yellow.png', b + '/textures/items')
if 'dye_powder_yellow.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'dye_powder_yellow.png'),
os.path.join(b + '/textures/items', str('dyePowder_yellow.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/egg.png', b + '/textures/items')
if 'egg.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'egg.png'),
os.path.join(b + '/textures/items', str('egg.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/emerald.png', b + '/textures/items')
if 'emerald.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'emerald.png'),
os.path.join(b + '/textures/items', str('emerald.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/map_empty.png', b + '/textures/items')
if 'map_empty.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'map_empty.png'),
os.path.join(b + '/textures/items', str('emptymap.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/book_enchanted.png', b + '/textures/items')
if 'book_enchanted.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'book_enchanted.png'),
os.path.join(b + '/textures/items', str('enchantedBook.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/ender_pearl.png', b + '/textures/items')
if 'ender_pearl.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'ender_pearl.png'),
os.path.join(b + '/textures/items', str('enderpearl.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/experience_bottle.png', b + '/textures/items')
if 'experience_bottle.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'experience_bottle.png'),
os.path.join(b + '/textures/items', str('expbottle.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/ender_eye.png', b + '/textures/items')
if 'ender_eye.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'ender_eye.png'),
os.path.join(b + '/textures/items', str('eyeofender.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/feather.png', b + '/textures/items')
if 'feather.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'feather.png'),
os.path.join(b + '/textures/items', str('feather.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/spider_eye_fermented.png', b + '/textures/items')
if 'spider_eye_fermented.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'spider_eye_fermented.png'),
os.path.join(b + '/textures/items', str('fermentedSpiderEye.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fireball.png', b + '/textures/items')
if 'fireball.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fireball.png'),
os.path.join(b + '/textures/items', str('fireball.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fireworks.png', b + '/textures/items')
if 'fireworks.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fireworks.png'),
os.path.join(b + '/textures/items', str('fireworks.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fireworks_charge.png', b + '/textures/items')
if 'fireworks_charge.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fireworks_charge.png'),
os.path.join(b + '/textures/items', str('fireworksCharge.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fireworks_charge_overlay.png', b + '/textures/items')
if 'fireworks_charge_overlay.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fireworks_charge_overlay.png'),
os.path.join(b + '/textures/items', str('fireworksCharge_overlay.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fish_cod_cooked.png', b + '/textures/items')
if 'fish_cod_cooked.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fish_cod_cooked.png'),
os.path.join(b + '/textures/items', str('fishCooked.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fish_cod_raw.png', b + '/textures/items')
if 'fish_cod_raw.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fish_cod_raw.png'),
os.path.join(b + '/textures/items', str('fishraw.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/items'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fishing_rod_uncast.png', b + '/textures/items')
if 'fishing_rod_uncast.png' in file_names:
os.rename(os.path.join(b + '/textures/items', 'fishing_rod_uncast.png'),
os.path.join(b + '/textures/items', str('fishingRod.png')))
| |
# Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from aim.api import infra as api_infra
from aim.api import resource as api_res
from aim.api import service_graph as api_service_graph
from aim.api import status as api_status
from aim.api import tree as api_tree
from aim.common import utils
from aim import exceptions as exc
LOG = logging.getLogger(__name__)
class AimManager(object):
"""Main object for performing operations on AIM.
To manipulate AIM database objects, invoke the appropriate
operation specifying an AimContext and the resource to operate on. The
resource should be an object of type that inherits from
aim.api.ResourceBase.
The AimContext must have property 'db_session' set to sqlalchemy
ORM session object; the database operation is performed in the
context of that session.
Example: Create a BridgeDomain object and then retrieve it
db = ...
a_ctx = AimContext(db_session=db)
mgr = AimManager(...)
bd = aim.api.resource.BridgeDomain(...)
mgr.create(a_ctx, bd)
retrieved_bd = mgr.get(a_ctx, bd)
"""
# TODO(ivar): aim_resources should eventually replace _db_model_map
# Set of managed AIM resources.
aim_resources = {api_res.BridgeDomain,
api_res.Agent,
api_res.Tenant,
api_res.Subnet,
api_res.VRF,
api_res.ApplicationProfile,
api_res.EndpointGroup,
api_res.Filter,
api_res.FilterEntry,
api_res.Contract,
api_res.ContractSubject,
api_res.ContractSubjFilter,
api_res.ContractSubjInFilter,
api_res.ContractSubjOutFilter,
api_res.ContractSubjGraph,
api_res.ContractSubjInGraph,
api_res.ContractSubjOutGraph,
api_status.AciStatus,
api_status.AciFault,
api_res.Endpoint,
api_res.VMMDomain,
api_res.PhysicalDomain,
api_res.L3Outside,
api_res.L3OutNodeProfile,
api_res.L3OutNode,
api_res.L3OutStaticRoute,
api_res.L3OutInterfaceProfile,
api_res.L3OutInterface,
api_res.ExternalNetwork,
api_res.ExternalSubnet,
api_res.L3OutInterfaceBgpPeerP,
api_infra.HostLink,
api_infra.OpflexDevice,
api_infra.HostDomainMapping,
api_infra.HostDomainMappingV2,
api_infra.HostLinkNetworkLabel,
api_infra.ApicAssignment,
api_res.SecurityGroup,
api_res.SecurityGroupSubject,
api_res.SecurityGroupRule,
api_res.Configuration,
api_service_graph.DeviceCluster,
api_service_graph.DeviceClusterInterface,
api_service_graph.ConcreteDevice,
api_service_graph.ConcreteDeviceInterface,
api_service_graph.ServiceGraph,
api_service_graph.ServiceGraphConnection,
api_service_graph.ServiceGraphNode,
api_service_graph.ServiceRedirectPolicy,
api_service_graph.DeviceClusterContext,
api_service_graph.DeviceClusterInterfaceContext,
api_service_graph.ServiceRedirectMonitoringPolicy,
api_service_graph.ServiceRedirectHealthGroup,
api_res.VMMPolicy,
api_res.Pod,
api_res.Topology,
api_res.VMMController,
api_res.VmmInjectedNamespace,
api_res.VmmInjectedDeployment,
api_res.VmmInjectedReplicaSet,
api_res.VmmInjectedService,
api_res.VmmInjectedHost,
api_res.VmmInjectedContGroup,
api_res.Infra,
api_res.NetflowVMMExporterPol,
api_res.QosRequirement,
api_res.QosDppPol,
api_res.VmmVswitchPolicyGroup,
api_res.VmmRelationToExporterPol,
api_res.SpanVsourceGroup,
api_res.SpanVsource,
api_res.SpanVdestGroup,
api_res.SpanVdest,
api_res.SpanVepgSummary,
api_res.InfraAccBundleGroup,
api_res.InfraAccPortGroup,
api_res.SpanSpanlbl,
api_tree.ActionLog}
# Keep _db_model_map in AIM manager for backward compatibility
_db_model_map = {k: None for k in aim_resources}
# Build adjacency graph (Key: <ACI Resource> Value: <Key's children>)
_model_tree = {}
_res_by_aci_type = {}
for klass in aim_resources:
try:
_model_tree.setdefault(klass._tree_parent, []).append(klass)
_res_by_aci_type[klass._aci_mo_name] = klass
except AttributeError:
pass
def __init__(self):
pass
@utils.log
def create(self, context, resource, overwrite=False, fix_ownership=False):
"""Persist AIM resource to the database.
If 'overwrite' is True, and an object exists in the database
with the same identity attribute values, then that object will
be over-written with the specified resource. Otherwise an
integrity constraint violation is raised.
"""
self._validate_resource_class(resource)
with context.store.begin(subtransactions=True):
old_db_obj = None
old_monitored = None
new_monitored = None
if overwrite:
old_db_obj = self._query_db_obj(context.store, resource)
if old_db_obj:
old_monitored = getattr(old_db_obj, 'monitored', None)
new_monitored = getattr(resource, 'monitored', None)
if (fix_ownership and old_monitored is not None and
old_monitored != new_monitored):
raise exc.InvalidMonitoredStateUpdate(object=resource)
attr_val = context.store.extract_attributes(resource,
"other")
old_resource = self._make_resource(context, resource,
old_db_obj)
if old_resource.user_equal(resource):
# No need to update. Return old_resource for
# updated DB attributes
return old_resource
context.store.from_attr(old_db_obj, type(resource),
attr_val)
db_obj = old_db_obj or context.store.make_db_obj(resource)
context.store.add(db_obj)
if self._should_set_pending(old_db_obj, old_monitored,
new_monitored):
# NOTE(ivar): we shouldn't change status in the AIM manager
# as this goes against the "AIM as a schema" principles.
# However, we need to do this at least for cases where
# we take ownership of the objects, which should be removed
# soon as it's causing most of our bugs.
self.set_resource_sync_pending(context, resource)
return self.get(context, resource)
@utils.log
def update(self, context, resource, fix_ownership=False,
force_update=False, **update_attr_val):
"""Persist updates to AIM resource to the database.
Values of identity attributes of parameter 'resource' are used
to determine the database object to update; no other attributes
from 'resource' are used during the update. Param 'update_attr_val'
specifies the values of the attributes to update.
If the object does not exist in the database, no changes are
made to the database.
"""
self._validate_resource_class(resource)
with context.store.begin(subtransactions=True):
db_obj = self._query_db_obj(context.store, resource)
if db_obj:
old_resource = self._make_resource(context, resource, db_obj)
old_monitored = getattr(db_obj, 'monitored', None)
new_monitored = update_attr_val.get('monitored')
if (fix_ownership and old_monitored is not None and
old_monitored != new_monitored):
raise exc.InvalidMonitoredStateUpdate(object=resource)
attr_val = {k: v for k, v in update_attr_val.items()
if k in resource.other_attributes.keys()}
if attr_val:
old_resource_copy = copy.deepcopy(old_resource)
for k, v in attr_val.items():
setattr(old_resource, k, v)
if old_resource.user_equal(
old_resource_copy) and not force_update:
# Nothing to do here
return old_resource
elif resource.identity_attributes:
# force update
id_attr_0 = list(resource.identity_attributes.keys())[0]
attr_val = {id_attr_0: getattr(resource, id_attr_0)}
context.store.from_attr(db_obj, type(resource), attr_val)
context.store.add(db_obj)
if self._should_set_pending(db_obj, old_monitored,
new_monitored):
# NOTE(ivar): we shouldn't change status in the AIM manager
# as this goes against the "AIM as a schema" principles.
# However, we need to do this at least for cases where
# we take ownership of the objects, which should be removed
# soon as it's causing most of our bugs.
self.set_resource_sync_pending(context, resource)
return self.get(context, resource)
def _should_set_pending(self, old_obj, old_monitored, new_monitored):
return old_obj and old_monitored is False and new_monitored is True
@utils.log
def delete(self, context, resource, force=False, cascade=False):
"""Delete AIM resource from the database.
Only values of identity attributes of parameter 'resource' are
used; other attributes may be left unspecified.
If the object does not exist in the database, no error is reported.
"""
self._validate_resource_class(resource)
with context.store.begin(subtransactions=True):
db_obj = self._query_db_obj(context.store, resource)
if db_obj:
if isinstance(resource, api_res.AciResourceBase):
status = self.get_status(
context, resource, create_if_absent=False)
if status and getattr(
db_obj, 'monitored', None) and not force:
if status.sync_status == status.SYNC_PENDING:
# Cannot delete monitored objects if sync status
# is pending, or ownership flip might fail
raise exc.InvalidMonitoredObjectDelete(
object=resource)
if status:
self.delete(context, status, force=force)
context.store.delete(db_obj)
# When cascade is specified, delete the object's subtree even if
# the resource itself doesn't exist.
if cascade:
for child_res in self.get_subtree(context, resource):
# Delete without cascade
self.delete(context, child_res, force=force)
@utils.log
def delete_all(self, context, resource_class, for_update=False, **kwargs):
"""Delete many AIM resources from the database that match criteria.
Parameter 'resource_class' indicates the type of resource to
look for. Matching criteria are specified as keyword-arguments.
Only equality matches are supported.
Returns a list of resources that match.
"""
self._validate_resource_class(resource_class)
attr_val = {k: v for k, v in kwargs.items()
if k in resource_class.attributes() +
['in_', 'notin_', 'order_by']}
return self._delete_db(context.store, resource_class, **attr_val)
def get(self, context, resource, for_update=False, include_aim_id=False):
"""Get AIM resource from the database.
Values of identity attributes of parameter 'resource' are used
to determine the database object to fetch; other attributes may
be left unspecified.
Returns a resource with all the attributes populated with contents
of the object in the database if the object is found, None
otherwise.
"""
self._validate_resource_class(resource)
db_obj = self._query_db_obj(context.store, resource,
for_update=for_update)
return self._make_resource(context, resource, db_obj,
include_aim_id=include_aim_id)
def _make_resource(self, context, resource, db_obj, include_aim_id=None):
return context.store.make_resource(
type(resource), db_obj,
include_aim_id=include_aim_id) if db_obj else None
def get_by_id(self, context, resource_class, aim_id, for_update=False,
include_aim_id=False):
self._validate_resource_class(resource_class)
db_obj = self._query_db(context.store, resource_class,
for_update=for_update, aim_id=aim_id)
return context.store.make_resource(
resource_class, db_obj[0],
include_aim_id=include_aim_id) if db_obj else None
def find(self, context, resource_class, for_update=False,
include_aim_id=False, **kwargs):
"""Find AIM resources from the database that match specified criteria.
Parameter 'resource_class' indicates the type of resource to
look for. Matching criteria are specified as keyword-arguments.
Only equality matches are supported.
Returns a list of resources that match.
"""
self._validate_resource_class(resource_class)
attr_val = {k: v for k, v in kwargs.items()
if k in resource_class.attributes() +
['in_', 'notin_', 'order_by']}
result = []
for obj in self._query_db(context.store, resource_class,
for_update=for_update, **attr_val):
result.append(
context.store.make_resource(resource_class, obj,
include_aim_id=include_aim_id))
return result
def count(self, context, resource_class, **kwargs):
self._validate_resource_class(resource_class)
attr_val = {k: v for k, v in kwargs.items()
if k in resource_class.attributes() +
['in_', 'notin_', 'order_by']}
return self._count_db(context.store, resource_class, **attr_val)
def get_status(self, context, resource, for_update=False,
create_if_absent=True):
"""Get status of an AIM resource, if any.
Values of identity attributes of parameter 'resource' are used
to determine the object to get status for; other attributes may
be left unspecified.
"""
with context.store.begin(subtransactions=True):
if isinstance(resource, api_res.AciResourceBase):
res_type, res_id = self._get_status_params(context, resource)
if res_type and res_id is not None:
status = self.get(context, api_status.AciStatus(
resource_type=res_type, resource_id=res_id,
resource_root=resource.root), for_update=for_update)
if not status:
if not create_if_absent:
return
# Create one with default values
# NOTE(ivar): Sometimes we need the status of | |
######################################################################
# mat4 - Matrix class (4x4 matrix)
#
# Copyright (C) 2002, <NAME> (<EMAIL>)
#
# You may distribute under the terms of the BSD license, as
# specified in the file license.txt.
####################################################################
import types, math, copy
from vec3 import vec3 as _vec3
from vec4 import vec4 as _vec4
from mat3 import mat3 as _mat3
# [ 0 1 2 3 ]
# [ 4 5 6 7 ]
# [ 8 9 10 11 ]
# [ 12 13 14 15 ]
# mat4
class mat4:
"""Matrix class (4x4).
This class represents a 4x4 matrix that can be used to store
affine transformations.
"""
def __init__(self, *args):
"Constructor"
# No arguments
if len(args)==0:
self.mlist = 16*[0.0]
# 1 argument (list, scalar or mat4)
elif len(args)==1:
T = type(args[0])
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.mlist = [args[0],0.0,0.0,0.0,
0.0,args[0],0.0,0.0,
0.0,0.0,args[0],0.0,
0.0,0.0,0.0,args[0]]
# mat4
elif isinstance(args[0], mat4):
self.mlist = copy.copy(args[0].mlist)
# String
elif T==types.StringType:
s=args[0].replace(","," ").replace(" "," ").strip().split(" ")
self.mlist=map(lambda x: float(x), s)
else:
self.mlist = list(args[0])
# 4 arguments (sequences)
elif len(args)==4:
a,b,c,d=args
self.mlist = [a[0], b[0], c[0], d[0],
a[1], b[1], c[1], d[1],
a[2], b[2], c[2], d[2],
a[3], b[3], c[3], d[3]]
# 16 arguments
elif len(args)==16:
self.mlist = list(args)
else:
raise TypeError,"mat4() arg can't be converted to mat4"
# Check if there are really 16 elements in the list
if len(self.mlist)!=16:
raise TypeError, "mat4(): Wrong number of matrix elements ("+`len(self.mlist)`+" instead of 16)"
def __repr__(self):
return 'mat4('+`self.mlist`[1:-1]+')'
def __str__(self):
fmt="%9.4f"
m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist
return ('['+fmt%m11+', '+fmt%m12+', '+fmt%m13+', '+fmt%m14+']\n'+
'['+fmt%m21+', '+fmt%m22+', '+fmt%m23+', '+fmt%m24+']\n'+
'['+fmt%m31+', '+fmt%m32+', '+fmt%m33+', '+fmt%m34+']\n'+
'['+fmt%m41+', '+fmt%m42+', '+fmt%m43+', '+fmt%m44+']')
def __eq__(self, other):
"""== operator"""
if isinstance(other, mat4):
return self.mlist==other.mlist
else:
return 0
def __ne__(self, other):
"""!= operator"""
if isinstance(other, mat4):
return self.mlist!=other.mlist
else:
return 1
def __add__(self, other):
"""Matrix addition.
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print M+M
[ 2.0000, 4.0000, 6.0000, 8.0000]
[ 10.0000, 12.0000, 14.0000, 16.0000]
[ 18.0000, 20.0000, 22.0000, 24.0000]
[ 26.0000, 28.0000, 30.0000, 32.0000]
"""
if isinstance(other, mat4):
return mat4(map(lambda x,y: x+y, self.mlist, other.mlist))
else:
raise TypeError, "unsupported operand type for +"
def __sub__(self, other):
"""Matrix subtraction.
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print M-M
[ 0.0000, 0.0000, 0.0000, 0.0000]
[ 0.0000, 0.0000, 0.0000, 0.0000]
[ 0.0000, 0.0000, 0.0000, 0.0000]
[ 0.0000, 0.0000, 0.0000, 0.0000]
"""
if isinstance(other, mat4):
return mat4(map(lambda x,y: x-y, self.mlist, other.mlist))
else:
raise TypeError, "unsupported operand type for -"
def __mul__(self, other):
"""Multiplication.
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print M*2.0
[ 2.0000, 4.0000, 6.0000, 8.0000]
[ 10.0000, 12.0000, 14.0000, 16.0000]
[ 18.0000, 20.0000, 22.0000, 24.0000]
[ 26.0000, 28.0000, 30.0000, 32.0000]
>>> print 2.0*M
[ 2.0000, 4.0000, 6.0000, 8.0000]
[ 10.0000, 12.0000, 14.0000, 16.0000]
[ 18.0000, 20.0000, 22.0000, 24.0000]
[ 26.0000, 28.0000, 30.0000, 32.0000]
>>> print M*M
[ 90.0000, 100.0000, 110.0000, 120.0000]
[ 202.0000, 228.0000, 254.0000, 280.0000]
[ 314.0000, 356.0000, 398.0000, 440.0000]
[ 426.0000, 484.0000, 542.0000, 600.0000]
>>> print M*_vec3(1,2,3)
(0.1765, 0.4510, 0.7255)
>>> print _vec3(1,2,3)*M
(0.7083, 0.8056, 0.9028)
"""
T = type(other)
# mat4*scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return mat4(map(lambda x,other=other: x*other, self.mlist))
# mat4*vec3
if isinstance(other, _vec3):
m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist
w = float(m41*other.x + m42*other.y + m43*other.z + m44)
return _vec3(m11*other.x + m12*other.y + m13*other.z + m14,
m21*other.x + m22*other.y + m23*other.z + m24,
m31*other.x + m32*other.y + m33*other.z + m34)/w
# mat4*vec4
if isinstance(other, _vec4):
m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist
return _vec4(m11*other.x + m12*other.y + m13*other.z + m14*other.w,
m21*other.x + m22*other.y + m23*other.z + m24*other.w,
m31*other.x + m32*other.y + m33*other.z + m34*other.w,
m41*other.x + m42*other.y + m43*other.z + m44*other.w)
# mat4*mat4
if isinstance(other, mat4):
m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist
n11,n12,n13,n14,n21,n22,n23,n24,n31,n32,n33,n34,n41,n42,n43,n44 = other.mlist
return mat4( m11*n11+m12*n21+m13*n31+m14*n41,
m11*n12+m12*n22+m13*n32+m14*n42,
m11*n13+m12*n23+m13*n33+m14*n43,
m11*n14+m12*n24+m13*n34+m14*n44,
m21*n11+m22*n21+m23*n31+m24*n41,
m21*n12+m22*n22+m23*n32+m24*n42,
m21*n13+m22*n23+m23*n33+m24*n43,
m21*n14+m22*n24+m23*n34+m24*n44,
m31*n11+m32*n21+m33*n31+m34*n41,
m31*n12+m32*n22+m33*n32+m34*n42,
m31*n13+m32*n23+m33*n33+m34*n43,
m31*n14+m32*n24+m33*n34+m34*n44,
m41*n11+m42*n21+m43*n31+m44*n41,
m41*n12+m42*n22+m43*n32+m44*n42,
m41*n13+m42*n23+m43*n33+m44*n43,
m41*n14+m42*n24+m43*n34+m44*n44)
# unsupported
else:
raise TypeError, "unsupported operand type for *"
def __rmul__(self, other):
T = type(other)
# scalar*mat4
if T==types.FloatType or T==types.IntType or T==types.LongType:
return mat4(map(lambda x,other=other: other*x, self.mlist))
# vec4*mat4
if isinstance(other, _vec4):
m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist
return _vec4(other.x*m11 + other.y*m21 + other.z*m31 + other.w*m41,
other.x*m12 + other.y*m22 + other.z*m32 + other.w*m42,
other.x*m13 + other.y*m23 + other.z*m33 + other.w*m43,
other.x*m14 + other.y*m24 + other.z*m34 + other.w*m44)
# vec3*mat4
if isinstance(other, _vec3):
m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist
w = float(other.x*m14 + other.y*m24 + other.z*m34 + m44)
return _vec3(other.x*m11 + other.y*m21 + other.z*m31 + m41,
other.x*m12 + other.y*m22 + other.z*m32 + m42,
other.x*m13 + other.y*m23 + other.z*m33 + m43)/w
# mat4*mat4
if isinstance(other, mat4):
return self.__mul__(other)
# unsupported
else:
raise TypeError, "unsupported operand type for *"
def __div__(self, other):
"""Division
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print M/2.0
[ 0.5000, 1.0000, 1.5000, 2.0000]
[ 2.5000, 3.0000, 3.5000, 4.0000]
[ 4.5000, 5.0000, 5.5000, 6.0000]
[ 6.5000, 7.0000, 7.5000, 8.0000]
"""
T = type(other)
# mat4/scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return mat4(map(lambda x,other=other: x/other, self.mlist))
# unsupported
else:
raise TypeError, "unsupported operand type for /"
def __mod__(self, other):
"""Modulo.
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print M%5.0
[ 1.0000, 2.0000, 3.0000, 4.0000]
[ 0.0000, 1.0000, 2.0000, 3.0000]
[ 4.0000, 0.0000, 1.0000, 2.0000]
[ 3.0000, 4.0000, 0.0000, 1.0000]
"""
T = type(other)
# mat4%scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return mat4(map(lambda x,other=other: x%other, self.mlist))
# unsupported
else:
raise TypeError, "unsupported operand type for %"
def __neg__(self):
"""Negation.
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print -M
[ -1.0000, -2.0000, -3.0000, -4.0000]
[ -5.0000, -6.0000, -7.0000, -8.0000]
[ -9.0000, -10.0000, -11.0000, -12.0000]
[ -13.0000, -14.0000, -15.0000, -16.0000]
"""
return mat4(map(lambda x: -x, self.mlist))
def __pos__(self):
"""
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print +M
[ 1.0000, 2.0000, 3.0000, 4.0000]
[ 5.0000, 6.0000, 7.0000, 8.0000]
[ 9.0000, 10.0000, 11.0000, 12.0000]
[ 13.0000, 14.0000, 15.0000, 16.0000]
"""
return mat4(map(lambda x: +x, self.mlist))
def __len__(self):
return 4
def __getitem__(self, key):
if type(key)==types.IntType:
if key<0 or key>3:
raise IndexError,"index out of range"
m=self.mlist
if key==0: return [m[0],m[4],m[8],m[12]]
elif key==1: return [m[1],m[5],m[9],m[13]]
elif key==2: return [m[2],m[6],m[10],m[14]]
elif key==3: return [m[3],m[7],m[11],m[15]]
elif type(key)==types.TupleType:
i,j=key
if i<0 or i>3 or j<0 or j>3:
raise IndexError, "index out of range"
return self.mlist[i*4+j]
else:
raise TypeError,"index must be integer or 2-tuple"
def __setitem__(self, key, value):
if type(key)==types.IntType:
if key<0 or key>3:
raise IndexError,"index out of range"
m=self.mlist
if key==0: m[0],m[4],m[8],m[12]=value
elif key==1: m[1],m[5],m[9],m[13]=value
elif key==2: m[2],m[6],m[10],m[14]=value
elif key==3: m[3],m[7],m[11],m[15]=value
elif type(key)==types.TupleType:
i,j=key
if i<0 or i>3 or j<0 or j>3:
raise IndexError, "index out of range"
self.mlist[i*4+j] = value
else:
raise TypeError,"index must be integer or 2-tuple"
def getRow(self, idx):
"""Return row (as vec4)."""
m=self.mlist
if idx==0: return _vec4(m[0], m[1], m[2], m[3])
elif idx==1: return _vec4(m[4], m[5], m[6], m[7])
elif idx==2: return _vec4(m[8], m[9], m[10], m[11])
elif idx==3: return _vec4(m[12], m[13], m[14], m[15])
else:
raise IndexError,"index out of range"
def setRow(self, idx, value):
"""Set row."""
m=self.mlist
if idx==0: m[0],m[1],m[2],m[3] = value
elif idx==1: m[4],m[5],m[6],m[7] = value
elif idx==2: m[8],m[9],m[10],m[11] = value
elif idx==3: m[12],m[13],m[14],m[15] = value
else:
raise IndexError,"index out of range"
def getColumn(self, idx):
"""Return column (as vec4)."""
m=self.mlist
if idx==0: return _vec4(m[0], m[4], m[8], m[12])
elif idx==1: return _vec4(m[1], m[5], m[9], m[13])
elif idx==2: return _vec4(m[2], m[6], m[10], m[14])
elif idx==3: return _vec4(m[3], m[7], m[11], m[15])
else:
raise IndexError,"index out of range"
def setColumn(self, idx, value):
"""Set column."""
m=self.mlist
if idx==0: m[0],m[4],m[8],m[12] = value
elif idx==1: m[1],m[5],m[9],m[13] = value
elif idx==2: m[2],m[6],m[10],m[14] = value
elif idx==3: m[3],m[7],m[11],m[15] = value
else:
raise IndexError,"index out of range"
def toList(self, rowmajor=0):
"""Return a list containing the matrix elements.
By default the list is in column-major order (which can directly be
used in OpenGL or RenderMan). If you set the optional argument
rowmajor to 1, you'll get the list in row-major order.
>>> M=mat4(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)
>>> print M.toList()
[1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, 4, 8, 12, 16]
>>> print M.toList(rowmajor=1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
"""
if rowmajor:
return copy.copy(self.mlist)
else:
return self.transpose().mlist
def identity(self):
"""Return identity matrix.
>>> print mat4().identity()
[ 1.0000, 0.0000, 0.0000, 0.0000]
[ 0.0000, 1.0000, 0.0000, 0.0000]
[ 0.0000, 0.0000, 1.0000, 0.0000]
[ 0.0000, 0.0000, 0.0000, 1.0000]
"""
return mat4(1.0, 0.0, 0.0, 0.0,
0.0, | |
'''
@author: <NAME>
'''
from domeniu.entitati import Numar
from infrastrctura.repos import Repo
from erori.exceptii import RepoError, ValidError
from business.services import ServiceNumere
from valid.validatoare import Validator
class Teste(object):
def __init__(self):
'''
Functie de tip constructor care construieste un obiect de tip Teste
Input: -
Output: -
'''
pass
def __test_creeaza_numar(self):
'''
Functie de tip test care verifica daca un obiect de tip Numar a fost creat cu succes
'''
numar = Numar('120', 3)
assert numar.get_valoare() == '120'
assert numar.get_baza() == 3
numar.set_valoare('122')
assert numar.get_valoare() == '122'
numar.set_baza(4)
assert numar.get_baza() == 4
def __test_repo_add(self):
'''
Functie de tip test care verifica daca adunarea a doua numere intr-o anumita baza a fost realizata cu succes
'''
repo = Repo()
numarA = Numar('120', 3)
numarB = Numar('102', 3)
rezultat = repo.add(numarA, numarB, 3)
assert rezultat.get_valoare() == '222'
assert rezultat.get_baza() == 3
numarA = Numar('120', 3)
numarB = Numar('110', 3)
rezultat = repo.add(numarA, numarB, 3)
assert rezultat.get_valoare() == '1000'
numarA = Numar('2A', 16)
numarB = Numar('14', 16)
rezultat = repo.add(numarA, numarB, 16)
assert rezultat.get_valoare() == '3E'
assert rezultat.get_baza() == 16
numarA = Numar('2A', 16)
numarB = Numar('17', 16)
rezultat = repo.add(numarA, numarB, 16)
assert rezultat.get_valoare() == '41'
numarA = Numar('1235', 10)
numarB = Numar('0', 10)
rezultat = repo.add(numarA, numarB, 10)
assert rezultat.get_valoare() == '1235'
assert rezultat.get_baza() == 10
numarA = Numar('1235', 10)
numarB = Numar('65', 10)
rezultat = repo.add(numarA, numarB, 10)
assert rezultat.get_valoare() == '1300'
numarA = Numar('', 10)
numarB = Numar('6', 10)
rezultat = repo.add(numarA, numarB, 10)
assert rezultat.get_valoare() == '6'
def __test_repo_multiply(self):
'''
Functie de tip test care verifica daca inmultirea a doua numere intr-o anumita baza a fost realizata cu succes
'''
repo = Repo()
numarA = Numar('120', 6)
numarB = Numar('2', 6)
rezultat = repo.multiply(numarA, numarB, 6)
assert rezultat.get_valoare() == '240'
assert rezultat.get_baza() == 6
numarA = Numar('120', 6)
numarB = Numar('3', 6)
rezultat = repo.multiply(numarA, numarB, 6)
assert rezultat.get_valoare() == '400'
numarA = Numar('52', 16)
numarB = Numar('3', 16)
rezultat = repo.multiply(numarA, numarB, 16)
assert rezultat.get_valoare() == 'F6'
numarA = Numar('57', 16)
numarB = Numar('7', 16)
rezultat = repo.multiply(numarA, numarB, 16)
assert rezultat.get_valoare() == '261'
numarA = Numar('130', 10)
numarB = Numar('0', 10)
rezultat = repo.multiply(numarA, numarB, 10)
assert rezultat.get_valoare() == '0'
numarA = Numar('10', 10)
numarB = Numar('10', 10)
rezultat = repo.multiply(numarA, numarB, 10)
assert rezultat.get_valoare() == '100'
numarA = Numar('34', 5)
numarB = Numar('12', 5)
rezultat = repo.multiply(numarA, numarB, 5)
assert rezultat.get_valoare() == '1013'
numarA = Numar('1010', 2)
numarB = Numar('11101', 2)
rezultat = repo.multiply(numarA, numarB, 2)
assert rezultat.get_valoare() == '100100010'
def __test_repo_substract(self):
'''
Functie de tip test care verifica daca scaderea a doua numere intr-o anumita baza a fost realizata cu succes
'''
repo = Repo()
numarA = Numar('54', 6)
numarB = Numar('32', 6)
rezultat = repo.substract(numarA, numarB, 6)
assert rezultat.get_valoare() == '22'
assert rezultat.get_baza() == 6
numarA = Numar('54', 6)
numarB = Numar('35', 6)
rezultat = repo.substract(numarA, numarB, 6)
assert rezultat.get_valoare() == '15'
numarA = Numar('54', 6)
numarB = Numar('45', 6)
rezultat = repo.substract(numarA, numarB, 6)
assert rezultat.get_valoare() == '5'
numarA = Numar('154', 6)
numarB = Numar('55', 6)
rezultat = repo.substract(numarA, numarB, 6)
assert rezultat.get_valoare() == '55'
numarA = Numar('AB', 16)
numarB = Numar('3C', 16)
rezultat = repo.substract(numarA, numarB, 16)
assert rezultat.get_valoare() == '6F'
assert rezultat.get_baza() == 16
numarA = Numar('127', 10)
numarB = Numar('30', 10)
rezultat = repo.substract(numarA, numarB, 10)
assert rezultat.get_valoare() == '97'
assert rezultat.get_baza() == 10
numarA = Numar('13', 10)
numarB = Numar('13', 10)
rezultat = repo.substract(numarA, numarB, 10)
assert rezultat.get_valoare() == '0'
assert rezultat.get_baza() == 10
def __test_repo_divide(self):
'''
Functie de tip test care verifica daca impartirea a doua numere intr-o anumita baza a fost realizata cu succes
'''
repo = Repo()
numarA = Numar('243', 5)
numarB = Numar('3', 5)
cat, rest = repo.divide(numarA, numarB, 5)
assert cat.get_valoare() == '44'
assert cat.get_baza() == 5
assert rest.get_valoare() == '1'
assert rest.get_baza() == 5
numarA = Numar('8140', 10)
numarB = Numar('3', 10)
cat, rest = repo.divide(numarA, numarB, 10)
assert cat.get_valoare() == '2713'
assert cat.get_baza() == 10
assert rest.get_valoare() == '1'
assert rest.get_baza() == 10
numarA = Numar('A5', 16)
numarB = Numar('D', 16)
cat, rest = repo.divide(numarA, numarB, 16)
assert cat.get_valoare() == 'C'
assert cat.get_baza() == 16
assert rest.get_valoare() == '9'
assert rest.get_baza() == 16
numberA = Numar('100', 10)
numberB = Numar('10', 10)
quotient, remainder = repo.divide(numberA, numberB, 10)
assert quotient.get_valoare() == '10'
assert quotient.get_baza() == 10
assert remainder.get_valoare() == '0'
assert remainder.get_baza() == 10
numberA = Numar('325', 10)
numberB = Numar('17', 10)
quotient, remainder = repo.divide(numberA, numberB, 10)
assert quotient.get_valoare() == '19'
assert remainder.get_valoare() == '2'
numberA = Numar('1111', 2)
numberB = Numar('100', 2)
quotient, remainder = repo.divide(numberA, numberB, 2)
assert quotient.get_valoare() == '11'
assert quotient.get_baza() == 2
assert remainder.get_valoare() == '11'
assert remainder.get_baza() == 2
def __test_repo_subtitutie(self):
'''
Functie de tip test care verifica daca conversia prin metoda subtitutiei a unui numar a fost realizata cu succes
'''
repo = Repo()
numar = Numar('1010', 2)
rezultat = repo.convert_subtitutie(numar)
assert rezultat.get_valoare() == '10'
assert rezultat.get_baza() == 10
numar = Numar('153', 6)
rezultat = repo.convert_subtitutie(numar)
assert rezultat.get_valoare() == '69'
numar = Numar('A3', 16)
rezultat = repo.convert_subtitutie(numar)
assert rezultat.get_valoare() == '163'
numar = Numar('132', 10)
try:
rezultat = repo.convert_subtitutie(numar)
assert False
except RepoError as re:
assert str(re) == "Baza nevalida!\n"
def __test_repo_impartiri_succesive(self):
'''
Functie test care verifica daca un numar a fost convertit cu succes prin imparitiri succesive
'''
repo = Repo()
numar = Numar('34', 10)
rezultat = repo.convert_impartiri_succesive(numar, 2)
assert rezultat.get_valoare() == '100010'
assert rezultat.get_baza() == 2
numar = Numar('63', 10)
rezultat = repo.convert_impartiri_succesive(numar, 16)
assert rezultat.get_valoare() == '3F'
assert rezultat.get_baza() == 16
numar = Numar('43', 10)
rezultat = repo.convert_impartiri_succesive(numar, 3)
assert rezultat.get_valoare() == '1121'
assert rezultat.get_baza() == 3
numar = Numar('17', 9)
try:
rezultat = repo.convert_impartiri_succesive(numar, 3)
assert False
except RepoError as re:
assert str(re) == "Baza nevalida!\n"
def __test_repo_convert_to_another_base(self):
'''
Functie test care verifica daca un numar este convertit dintr-o baza in alta cu succes
'''
repo = Repo()
numar = Numar('120', 3)
rezultat = repo.convert_to_another_base(numar, 7)
assert rezultat.get_valoare() == '21'
assert rezultat.get_baza() == 7
numar = Numar('A7', 16)
rezultat = repo.convert_to_another_base(numar, 5)
assert rezultat.get_valoare() == '1132'
assert rezultat.get_baza() == 5
numar = Numar('35', 10)
rezultat = repo.convert_to_another_base(numar, 2)
assert rezultat.get_valoare() == '100011'
assert rezultat.get_baza() == 2
numar = Numar('163', 9)
rezultat = repo.convert_to_another_base(numar, 10)
assert rezultat.get_valoare() == '138'
assert rezultat.get_baza() == 10
numar = Numar('150', 10)
rezultat = repo.convert_to_another_base(numar, 10)
assert rezultat.get_valoare() == '150'
assert rezultat.get_baza() == 10
numar = Numar('1EF', 16)
rezultat = repo.convert_to_another_base(numar, 2)
assert rezultat.get_valoare() == '111101111'
assert rezultat.get_baza() == 2
numar = Numar('10101011', 2)
rezultat = repo.convert_to_another_base(numar, 8)
assert rezultat.get_valoare() == '253'
assert rezultat.get_baza() == 8
def __test_repo_convert_base4_to_base2(self):
'''
Functie care verifica daca conversia unui numar din baza 4 in baza 2 a fost realizata cu succes
'''
repo = Repo()
numar = Numar('31', 4)
rezultat = repo.convert_base4_to_base2(numar)
assert rezultat.get_valoare() == '1101'
assert rezultat.get_baza() == 2
numar = Numar('1203', 4)
rezultat = repo.convert_base4_to_base2(numar)
assert rezultat.get_valoare() == '1100011'
numar = Numar('21', 6)
try:
rezultat = repo.convert_base4_to_base2(numar)
assert False
except RepoError as re:
assert str(re) == "Baza nevalida!\n"
def __test_repo_convert_base8_to_base2(self):
'''
Functie care verifica daca conversia unui numar din baza 8 in baza 2 a fost realizata cu succes
'''
repo = Repo()
numar = Numar('1430', 8)
rezultat = repo.convert_base8_to_base2(numar)
assert rezultat.get_valoare() == '1100011000'
assert rezultat.get_baza() == 2
numar = Numar('7256', 8)
rezultat = repo.convert_base8_to_base2(numar)
assert rezultat.get_valoare() == '111010101110'
numar = Numar('23', 7)
try:
rezultat = repo.convert_base8_to_base2(numar)
assert False
except RepoError as re:
assert str(re) == "Baza nevalida!\n"
def __test_repo_convert_base16_to_base2(self):
'''
Functie care | |
"""
Abstract method for sending/receiving requests for forecast weather
data from an api service
:param location: location for which to query the remote api
:return: list of dictionaries containing weather data corresponding
to forecast timestamp
"""
# @RPC.export
# def get_hourly_historical(self, locations, start_date, end_date):
# data = []
# service_name = "get_hourly_historical"
# start_datetime = datetime.datetime.combine(start_date,
# datetime.time())
# end_datetime = datetime.datetime.combine(end_date, datetime.time())
# + \
# (datetime.timedelta(days=1))
# for location in locations:
# if not self.validate_location(service_name, location):
# raise ValueError("Invalid Location:{}".format(location))
# current = start_datetime
# while current <= end_datetime:
# records = []
# cached_history = self.get_cached_historical_data(
# service_name, location, current)
# if cached_history:
# for item in cached_history:
# observation_time = format_timestamp(item[0])
# record = [location, observation_time,
# jsonapi.loads(item[1])]
# records.append(record)
# if not len(records):
# response = self.query_hourly_historical(location, current)
# storage_records = []
# for item in response:
# records.append(item)
# observation_time = parse_timestamp_string(item[0])
# s_record = [location, observation_time,
# jsonapi.dumps(item[1])]
# storage_records.append(s_record)
# record = [location,
# format_timestamp(observation_time),
# jsonapi.dumps(item[1])]
# self.store_weather_records(service_name, storage_records)
# for record in records:
# data.append(record)
# current = current + datetime.timedelta(hours=1)
# return data
@abstractmethod
def query_hourly_historical(self, location, start_date, end_date):
"""
Abstract method for sending/receiving requests for forecast weather
data from an api service
:param location: location for which to query the remote api
:param start_date: timestamp indicating the start of a historical
period for which to query the api
:param end_date: timestamp indicating the end of a historical period
for which to query the api
:return: list of dictionaries containing historical weather data
corresponding to a historical timestamp
"""
def poll_for_locations(self):
"""
Called periodically by core.period to get_current_weather with the
agent's polling_locations list as a
parameter. Publishes to the corresponding entry in the agent's
poll_topic_suffixes, or to /all if none
are specified.
"""
_log.debug("polling for locations")
results = self.get_current_weather(self.poll_locations)
if self.poll_topic_suffixes is None:
_log.debug("publishing results to single topic")
self.publish_response(POLL_TOPIC.format("all"), results)
else:
for i in range(0, len(results)):
_log.debug("publishing results to location specific topic")
poll_topic = POLL_TOPIC.format(self.poll_topic_suffixes[i])
self.publish_response(poll_topic, results[i])
def publish_response(self, topic, publish_item):
"""
Publishes a response with the correct headers and topic to the
Volttron message bus.
:param topic: topic string to send with the message bus publish for
the message
:param publish_item: message contents to be sent in the message bus
publish.
"""
publish_headers = {
HEADER_NAME_DATE: format_timestamp(get_aware_utc_now()),
HEADER_NAME_CONTENT_TYPE: headers.CONTENT_TYPE}
self.vip.pubsub.publish(peer="pubsub", topic=topic,
message=publish_item,
headers=publish_headers)
def manage_unit_conversion(self, from_units, value, to_units):
"""
Used to convert units from a query response to the expected
standard units
:param from_units: pint formatted unit string for the current value
:param value: magnitude of a measurement prior to conversion
:param to_units: pint formatted unit string for the output value
:return: magnitude of measurement in the desired units
"""
if self.unit_registry.parse_expression(
from_units) == self.unit_registry.parse_expression(to_units):
return value
else:
starting_quantity = self.unit_registry.Quantity(value, from_units)
updated_value = starting_quantity.to(to_units).magnitude
return updated_value
def get_cached_historical_data(self, request_name, location,
date_timestamp):
"""
Utility method to retrieve cached historical data without direct
interface with the cache.
:param request_name: name of the api service function for which to
retrieve cached data.
:param location: location of the weather data to return
:param date_timestamp: date for which to retrieve cached data.
:return: list of dictionaries of historical weather data for the date
and location.
"""
return self._cache.get_historical_data(request_name,
jsonapi.dumps(location),
date_timestamp)
def store_weather_records(self, service_name, records):
"""
Generically stores weather records returned from the api into the
corresponding service table in the cache
database.
:param service_name: name of the api service function, for which
records will be put into the corresponding
table.
:param records: list of records to put into the insert query.
"""
try:
cache_full = self._cache.store_weather_records(service_name,
records)
if cache_full:
self.vip.health.set_status(STATUS_BAD,
"Weather agent cache is full")
status = Status.from_json(self.vip.health.get_status_json())
self.vip.health.send_alert(CACHE_FULL, status)
except Exception as error:
err_msg = "Weather agent failed to write to cache"
_log.error("{}. Exception:{}".format(err_msg, error))
self.vip.health.set_status(STATUS_BAD, err_msg)
status = Status.from_json(self.vip.health.get_status_json())
self.vip.health.send_alert(CACHE_WRITE_ERROR, status)
self.cache_write_error = True
raise error
else:
if self.cache_write_error:
self.vip.health.set_status(STATUS_GOOD)
self.cache_write_error = False
@Core.receiver("onstart")
def starting(self, sender, **kwargs):
for service_name in self._api_services:
if not self._api_services[service_name]["type"] == "history":
interval = self.get_update_interval(service_name)
if interval:
self.set_update_interval(service_name, interval)
description = self.get_api_description(service_name)
if description:
self.set_api_description(service_name, description)
@Core.receiver("onstop")
def stopping(self, sender, **kwargs):
self._cache.close()
class WeatherCache:
"""Caches data to help reduce the number of requests to the API"""
def __init__(self,
database_file,
api_services=None,
calls_limit=None,
calls_period=None,
max_size_gb=1,
check_same_thread=True):
"""
:param database_file: path sqlite file to use for cache
:param api_services: dictionary from BaseAgent, used to determine
table names
:param max_size_gb: maximum size in gigabytes of the sqlite database
file, useful for deployments with limited
storage capacity
:param check_same_thread: True to allow multiple threads to connect
to the sqlite object, else false (see
https://docs.python.org/3/library/sqlite3.html)
"""
self._calls_limit = calls_limit
self._calls_period = calls_period
self._db_file_path = database_file
self._api_services = api_services
self._max_size_gb = max_size_gb
self._sqlite_conn = None
self._max_pages = None
self._setup_cache(check_same_thread)
self.pending_calls = []
# cache setup methods
def _setup_cache(self, check_same_thread):
"""
Prepares the cache to begin processing weather data
:param check_same_thread: True to allow multiple threads to connect
to the sqlite object, else false (see
https://docs.python.org/3/library/sqlite3.html)
"""
_log.debug("Setting up backup DB.")
_log.debug(self._db_file_path)
self._sqlite_conn = sqlite3.connect(
self._db_file_path,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
check_same_thread=check_same_thread)
_log.info("connected to database, sqlite version: {}".format(
sqlite3.version))
self.create_tables()
cursor = self._sqlite_conn.cursor()
if self._max_size_gb is not None:
cursor.execute("PRAGMA page_size")
page_size = cursor.fetchone()[0]
max_storage_bytes = self._max_size_gb * 1024 ** 3
self._max_pages = int(max_storage_bytes / page_size)
self.manage_cache_size()
cursor.close()
def create_tables(self):
"""
Creates the necessary tables for the weather agent's services and
ensures proper structure.
"""
cursor = self._sqlite_conn.cursor()
if self._calls_limit:
try:
cursor.execute(CREATE_STMT_API_CALLS)
self._sqlite_conn.commit()
_log.debug(CREATE_STMT_API_CALLS)
except sqlite3.OperationalError as o:
if str(o).startswith("table") and str(o).endswith("already "
"exists"):
self.validate_and_fix_cache_tables("API_CALLS", None)
else:
raise RuntimeError("Unable to create api_call table")
for service_name in self._api_services:
table_exists = False
table_type = None
try:
table_type = self._api_services[service_name]["type"]
if table_type == "forecast":
create_table = CREATE_STMT_FORECAST.format(
table=service_name)
elif table_type == "current" or table_type == "history":
create_table = CREATE_STMT_CURRENT.format(
table=service_name)
else:
raise ValueError("Invalid table type {} "
"for table {}.".format(table_type,
service_name))
_log.debug(create_table)
cursor.execute(create_table)
self._sqlite_conn.commit()
except sqlite3.OperationalError as o:
if str(o).startswith("table") and str(o).endswith("already "
"exists"):
table_exists = True
except sqlite3.Error as err:
_log.error("Unable to create database table: {}".format(err))
if table_exists:
self.validate_and_fix_cache_tables(service_name, table_type)
cursor.close()
def validate_and_fix_cache_tables(self, service_name, table_type):
"""
Ensures that the proper columns are in the service's table.
:param service_name: api service function name to be used as the
table name
:param table_type: indicates the expected columns for the service (
must be forecast, history, or current)
"""
if service_name == 'API_CALLS':
expected_columns = ["CALL_TIME"]
elif table_type == "forecast":
expected_columns = ["ID", "LOCATION", "GENERATION_TIME",
"FORECAST_TIME", "POINTS"]
else:
expected_columns = ["ID", "LOCATION", "OBSERVATION_TIME", "POINTS"]
column_names = []
cursor = self._sqlite_conn.cursor()
table_info = cursor.execute(
"PRAGMA table_info({})".format(service_name)).fetchall()
for row in table_info:
column_names.append(row[1])
for column_name in expected_columns:
if column_name not in column_names:
delete_query = "DROP TABLE {};".format(service_name)
cursor.execute(delete_query)
self._sqlite_conn.commit()
_log.debug(delete_query)
create_table = ""
if service_name == "API_CALLS":
create_table = CREATE_STMT_API_CALLS
elif table_type == "forecast":
create_table = CREATE_STMT_FORECAST.format(
table=service_name)
elif table_type == "current" or table_type == "history":
create_table = CREATE_STMT_CURRENT.format(
table=service_name)
if len(create_table):
_log.debug(create_table)
cursor.execute(create_table)
self._sqlite_conn.commit()
break
def api_calls_available(self, num_calls=1):
"""
:param num_calls: Number of calls requested by the agent to fulfill the
user's request
:return: True if the number of calls within the call period is less
than the api calls limit, false otherwise
"""
if num_calls < 1:
raise ValueError('Invalid quantity for API calls')
if self._calls_limit < 0:
return True
try:
cursor = self._sqlite_conn.cursor()
if self._calls_period:
current_time = get_aware_utc_now()
expiry_time = current_time - self._calls_period
delete_query = """DELETE FROM API_CALLS WHERE CALL_TIME <= ?;"""
cursor.execute(delete_query, (expiry_time,))
self._sqlite_conn.commit()
active_calls_query = """SELECT COUNT(*) FROM API_CALLS;"""
cursor.execute(active_calls_query)
active_calls = cursor.fetchone()[0]
cursor.close()
return active_calls + num_calls <= self._calls_limit
except AttributeError as error:
_log.error("Error getting available API calls: {}".format(error))
# Add a call to the pending queue so we can track it later
self.pending_calls.append(get_aware_utc_now())
return True
def add_api_call(self):
"""
If an API call is available given the constraints, adds an entry to
the table for tracking
:return: True if an entry was made successfully, false otherwise
"""
try:
cursor = self._sqlite_conn.cursor()
insert_query = """INSERT INTO API_CALLS
(CALL_TIME) VALUES (?);"""
# | |
# # print ("plane_now.look")
# plane_now.look()
# # game.look()
check, new_pos = self.check_plane_action(plane_now.position, joint_action[2 + self.cars + i][0].index(1))
if (check):
plane_now.position = new_pos
# # print ("plane_now.look")
# plane_now.look()
# print ("plane%d end:" % i)
# print("update materials points")
'''
if((self.step_cnt+1)%self.K==0):
for i in range(self.materials):
# print ("material%d:"%i)
m_now=self.material_points[i]
# m_now.look()
m_now.update()
'''
# m_now.look()
# print("load and unload cars and planes")
vehicle_list = self.cars_list + self.planes_list
for i in range(self.cars + self.planes):
vehicle = vehicle_list[i]
pos = vehicle.position
x = pos[0]
y = pos[1]
# vehicle.look()
if (self.map[x][y] == 3): # material points
index = self.map_mate_index[(x, y)]
material_now = self.material_points[index]
# material_now.look()
material_num = material_now.number
if (material_num > 0): # load
exchange = min(material_num, vehicle.max_num - vehicle.number)
vehicle.number = vehicle.number + exchange
material_num = material_num - exchange
material_now.number = material_num
# self.reward += exchange
else:
if (material_num < 0): # unload
exchange = min(-material_num, vehicle.number)
vehicle.number = vehicle.number - exchange
material_num = material_num + exchange
self.reward += exchange
self.reward_per[i] += exchange
material_now.number = material_num
# material_now.look()
# vehicle.look()
self.step_cnt += 1
# if self.is_a_g_terminal():
# self.reset()
# else:
for i in range(self.board_height):
for j in range(self.board_width):
# print(self.current_state)
self.current_state[i][j][0] = self.map[i][j]
for i in range(self.planes):
pos = self.planes_list[i].position
x = pos[0]
y = pos[1]
if (self.current_state[x][y][0] == 0):
self.current_state[x][y][0] = 5
info_after = '' # to add later
return self.current_state, self.get_observation_s(self.current_state)
def get_observation_s(self, s): # from old current state to state update
# observation shape: 121
s_ = s
state_id = []
for i in range(self.board_height):
for j in range(self.board_width):
state_id.append(s_[i][j][0])
for i in range(self.cars):
state_id = state_id+[self.cars_list[i].number, self.cars_list[i].position[0], self.cars_list[i].position[1]]
for i in range(self.materials):
state_id.append(self.material_points[i].number/40)
return state_id
def get_observation_space(self): # TODO
return 124
def get_observation_add(self, s, agent_id): # from old current state to state update
'''
utility: mapping from state to observation
return obs1, obs2
TODO: different obs
'''
s_ = s
state = []
for i in range(self.board_height):
for j in range(self.board_width):
state.append(s_[i][j][0])
vehicle_num = len(self.cars_list)
for n in range(vehicle_num): # 3 * 2 = 6
# pos = self.cars_list[n].position
# state.append(pos[0])
# state.append(pos[1])
state.append(self.cars_list[n].number)
for m in range(self.materials): # 3 * 4 = 12
# pos = self.material_points[m].position
# state.append(pos[0])
# state.append(pos[1])
state.append(self.material_points[m].number)
return state
def check_car_action(self, position, action):
x = position[0]
y = position[1]
x = x + self.actionx[action]
y = y + self.actiony[action]
pos = [x, y]
check = 1
# penalty = 0
if ((x < 0) or (x >= self.board_height) or (y < 0) or (y >= self.board_width)):
# print("汽车行驶超过地图外")
check = 0
penalty = 1
return check, position # , penalty
if (self.map[x][y] == 1 or self.map[x][y] == 2):
# print("汽车行驶遇到障碍物")
check = 0
penalty = 1
return check, position # , penalty
return check, pos # , penalty
def check_plane_action(self, position, action):
x = position[0]
y = position[1]
x = x + self.actionx[action]
y = y + self.actiony[action]
pos = [x, y]
check = 1
if ((x < 0) or (x >= self.board_height) or (y < 0) or (y >= self.board_width)):
# print("飞机行驶超过地图外")
check = 0
return check, position
return check, pos
def get_terminal_actions(self):
# print("请输入个敌方放置障碍的坐标'x y',加引号并且空格隔开:" )
a = input()
actions = []
x, y = a.split(' ')
actions.append([int(x), int(y)])
# print("请输入%d辆汽车的动作,0上,1 右,2下,3左,4不动'a b c d'"%self.cars)
a = input()
car_action = a.split(' ')
for i in range(len(car_action)):
car_action[i] = int(car_action[i])
actions.append(car_action)
# print("请输入%d架飞机的动作,0上,1 右,2下,3左,4不动'a b c d'" % self.planes)
a = input()
plane_action = a.split(' ')
for i in range(len(plane_action)):
plane_action[i] = int(plane_action[i])
actions.append(plane_action)
return actions
def is_terminal(self):
flag = True
for i in range(self.cars):
if self.material_points[i].number != 0:
flag = False
# if flag:
# print("finish")
return self.step_cnt > self.max_step or flag
# def look(self):
# # print ("game look")
# for i in range(self.board_height):
# # print(self.map[i])
def set_action_space(self): # add
action_space = [[self.board_height], [self.board_width]] + [[5] for _ in range(self.cars)] + [[5] for _ in
range(
self.planes)]
# 对手设置的坐标,汽车的动作,飞机的动作
return action_space
def step_before_info(self, info=''): # add
setting_barriers_pos = []
setting_barriers_total = []
setting_barriers_age = []
for i in range(len(self.opponent.barriers_list)):
setting_barriers_pos.append(self.opponent.barriers_list[i].position)
setting_barriers_total.append(self.opponent.barriers_list[i].total)
setting_barriers_age.append(self.opponent.barriers_list[i].age)
info = "当前敌方设置障碍物位置:%s" % str(setting_barriers_pos)
info += "\n当前敌方设置障碍物年限:%s" % str(setting_barriers_total)
info += "\n当前敌方设置障碍物年龄:%s" % str(setting_barriers_age)
material_pos = []
material_num = []
for i in range(self.materials):
material_pos.append(self.material_points[i].position)
material_num.append(self.material_points[i].number)
info += "\n当前物资集散点位置:%s" % str(material_pos)
info += "\n当前物资集散点物资数量:%s" % str(material_num)
return info
def get_reward(self, joint_action): # add
# reward = [self.reward_per]
# reward = [np.array([self.reward, self.reward])] # old
reward = np.array([[self.reward_per[i] for i in range(self.cars)]])
self.reward = 0
self.reward_per = np.zeros(self.cars)
# # print("score:", self.won)
# return [self.reward,-self.reward] #TODO: reward 需不需要reset to 0
# self.reward_tot += self.reward
return reward
def check_win(self): #
if (self.reward > 0):
# print("我方胜")
return 1
if (self.reward < 0):
# print("敌方胜")
return -1
# print("平局")
return 0
@staticmethod
def _render_board(state, board, colors, unit, fix, extra_info): ##add
# print("1111111111111111111111111")
im = GridGame._render_board(state, board, colors, unit, fix)
draw = ImageDraw.Draw(im)
# fnt = ImageFont.truetype("Courier.dfont", 16)
for i in extra_info.keys():
x = i[0]
y = i[1]
draw.text(((y + 1.0 / 4) * unit, (x + 1.0 / 4) * unit),
extra_info[i],
# font=fnt,
fill=(0, 0, 0))
'''
for i, pos in zip(count(1), extra_info):
x, y = pos
draw.text(((y + 1.0 / 4) * unit, (x + 1.0 / 4) * unit),
"#{}".format(i),
font=fnt,
fill=(0, 0, 0))
'''
return im
def render_board(self): ##add
extra_info = {}
for i in range(self.barriers):
pos = self.fixed_barrier[i]
x = pos[0]
y = pos[1]
if (x, y) not in extra_info.keys():
extra_info[(x, y)] = 'F_B'
else:
extra_info[(x, y)] += '\n' + 'F_B'
for i in range(self.opponent.barriers):
pos = self.opponent.barriers_list[i].position
x = pos[0]
y = pos[1]
if (x, y) not in extra_info.keys():
extra_info[(x, y)] = 'Set_B'
else:
extra_info[(x, y)] += '\n' + 'Set_B'
for i in range(self.materials):
pos = self.material_points[i].position
num = self.material_points[i].number
x = pos[0]
y = pos[1]
if (x, y) not in extra_info.keys():
extra_info[(x, y)] = 'M' + str(num)
else:
extra_info[(x, y)] += '\n' + 'M' + str(num)
for i in range(self.cars):
pos = self.cars_list[i].position
num = self.cars_list[i].number
x = pos[0]
y = pos[1]
if (x, y) not in extra_info.keys():
extra_info[(x, y)] = 'C' + str(i) + ':' + str(num)
else:
extra_info[(x, y)] += '\n' + 'C' + str(i) + ':' + str(num)
for i in range(self.planes):
pos = self.planes_list[i].position
num = self.planes_list[i].number
x = pos[0]
y = pos[1]
if (x, y) not in extra_info.keys():
extra_info[(x, y)] = 'P' + str(i) + ':' + str(num)
else:
extra_info[(x, y)] += '\n' + 'P' + str(i) + ':' + str(num)
im_data = np.array(
Transport._render_board(self.get_render_data(self.current_state), self.grid, self.colors, self.grid_unit,
self.grid_unit_fix,
extra_info))
self.game_tape.append(im_data)
return im_data
def _close_view(self):
if self.root:
self.root.destory()
self.root = None
self.canvas = None
# self.done = True
def _render(self):
map = np.array(self.map)
scale = 50
width = map.shape[0] * scale
height = map.shape[1] * scale
if self.root is None:
self.root = tkinter.Tk()
self.root.title("escalator env")
self.root.protocol("WM_DELETE_WINDOW", self._close_view)
self.canvas = tkinter.Canvas(self.root, width=width, height=height)
self.canvas.pack()
self.canvas.delete(tkinter.ALL)
self.canvas.create_rectangle(0, 0, width, height, fill="black")
def fill_cell(x, y, color):
self.canvas.create_rectangle(
x * scale,
y * scale,
(x + 1) * scale,
(y + 1) * scale,
fill=color
)
for x in range(map.shape[0]):
for y in range(map.shape[1]):
if map[x, y] == 1:
fill_cell(x, y, 'Grey')
if map[x, y] == 2:
fill_cell(x, y, 'Green')
if map[x, y] == 3:
fill_cell(x, y, 'Red')
for c in self.cars_list:
if c.number == 0:
fill_cell(c.position[0], c.position[1], "Blue")
else:
fill_cell(c.position[0], c.position[1], "Pink")
self.root.update()
def render_game(g, fps=1):
import pygame
pygame.init()
screen = pygame.display.set_mode(g.grid.size)
pygame.display.set_caption(g.game_name)
clock = pygame.time.Clock()
while not g.is_terminal():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# print("step %d" % g.step_cnt)
joint_act = my_agent(g.current_state, g.joint_action_space)
next_state, reward, done, info_before, info_after = g.step(joint_act)
# print(joint_act)
# print(reward)
# print(info_before)
pygame.surfarray.blit_array(screen, g.render_board().transpose(1, 0, 2))
pygame.display.flip()
if info_after:
print(info_after)
# 调整帧率
clock.tick(fps)
fname = "./image/" + str(g.step_cnt) + ".png" # save image
pygame.image.save(screen, fname)
# print("winner", g.check_win())
# print("winner_information", str(g.won))
def create_video(step):
import cv2
img_root = './image/'
fps = 1
image = cv2.imread('./image/1.png')
a = image.shape
size = (a[0], a[1]) # | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from datetime import time
import _init_paths
import os
import pprint
import argparse
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import torch.optim as optim
from tqdm import tqdm
import datasets
import models
from core.config import config, update_config
from core.engine import Engine
from core.utils import AverageMeter
from core import eval
from core.utils import create_logger
import models.loss as loss
import math
import torch.distributed as dist
import time
from torch.autograd import profiler
from prefetch_generator import BackgroundGenerator
torch.manual_seed(0)
torch.cuda.manual_seed(0)
class MTimmer:
def __init__(self, local_rank):
self.last_time = time.time()
self.local_rank = local_rank
def click(self, message='No massage'):
if self.local_rank == 2:
print()
print('=', self.local_rank, '= ', time.asctime(time.localtime(time.time())), '----',
time.time() - self.last_time)
print(message)
self.last_time = time.time()
class DataLoaderX(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
def parse_args():
parser = argparse.ArgumentParser(description='Train localization network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# training
parser.add_argument('--gpus', help='gpus', type=str)
parser.add_argument('--workers', help='num of dataloader workers', type=int)
parser.add_argument('--dataDir', help='data path', type=str)
parser.add_argument('--modelDir', help='model path', type=str)
parser.add_argument('--logDir', help='log path', type=str)
parser.add_argument('--verbose', default=False, action="store_true", help='print progress bar')
parser.add_argument('--tag', help='tags shown in log', type=str)
parser.add_argument('--local_rank', help='local rank', type=int, default=0)
parser.add_argument('--tensorboardDir', help='tensorboard path', type=str)
parser.add_argument('--debug', default=False, action='store_true', help='enable assert')
args = parser.parse_args()
return args
def reset_config(config, args):
if args.gpus:
config.GPUS = args.gpus
if args.workers:
config.WORKERS = args.workers
if args.dataDir:
config.DATA_DIR = args.dataDir
if args.modelDir:
config.MODEL_DIR = args.modelDir
if args.logDir:
config.LOG_DIR = args.logDir
if args.verbose:
config.VERBOSE = args.verbose
if args.tag:
config.TAG = args.tag
if args.tensorboardDir:
config.TENSORBOARD_DIR = args.tensorboardDir
if args.debug:
config.DEBUG = args.debug
def synchronize(verbose=False):
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
if verbose:
print('waiting={}'.format(dist.get_rank()))
dist.barrier()
if verbose:
print('waiting finished={}'.format(dist.get_rank()))
def gather_tensor(data: torch.Tensor, dim=0, dst: int = None):
N = dist.get_world_size()
if N == 1:
return data
is_dst = dst is None or dst == dist.get_rank()
# get tensor size
size = torch.tensor(data.shape[dim], device=data.device)
size_list = [size.clone() for _ in range(N)] if is_dst else None
if dst is None:
torch.distributed.all_gather(size_list, size)
else:
torch.distributed.gather(tensor=size, gather_list=size_list, dst=dst)
max_size = max(size.item() for size in size_list)
shape = list(data.shape)
shape[dim] = max_size
tensor_list = [data.new_empty(shape) for _ in range(N)] if is_dst else None
# pad to same shape
if data.shape[dim] != max_size:
shape[dim] = max_size - data.shape[dim]
tensor = torch.cat([data, data.new_zeros(shape)], dim=dim)
else:
tensor = data
if dst is None:
torch.distributed.all_gather(tensor_list, tensor)
else:
torch.distributed.gather(tensor, tensor_list, dst)
if is_dst:
return torch.cat([x.narrow(dim, 0, n.item()) for n, x in zip(size_list, tensor_list)], dim=dim)
else:
return None
def main():
args = parse_args()
reset_config(config, args)
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
torch.multiprocessing.set_sharing_strategy('file_system')
if config.DEBUG:
torch.autograd.set_detect_anomaly(True)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
distribute = True
else:
distribute = False
if 0 == args.local_rank:
main_work = True
else:
main_work = False
dataset_name = config.DATASET.NAME
model_name = config.MODEL.NAME
torch.cuda.set_device(args.local_rank)
# logger tensorboard
if main_work:
logger, final_output_dir, time_str, tb_writer = create_logger(config, args.cfg, config.TAG)
logger.info('\n' + pprint.pformat(args))
logger.info('\n' + pprint.pformat(config))
train_dataset = getattr(datasets, dataset_name)('train', training=True)
if config.TEST.EVAL_TRAIN:
eval_train_dataset = getattr(datasets, dataset_name)('train')
if not config.DATASET.NO_VAL:
val_dataset = getattr(datasets, dataset_name)('val')
test_dataset = getattr(datasets, dataset_name)('test')
model = getattr(models, model_name)()
if distribute:
torch.distributed.init_process_group(backend="nccl")
synchronize()
model = model.cuda(args.local_rank)
if distribute:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
if hasattr(model.module, 'get_parameters'):
params = model.module.get_parameters()
else:
params = model.parameters()
optimizer = optim.Adam(params, lr=config.TRAIN.LR, betas=(0.9, 0.999), eps=1e-8,
weight_decay=config.TRAIN.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=config.TRAIN.FACTOR,
patience=config.TRAIN.PATIENCE, verbose=config.VERBOSE)
def iterator(split):
def get_sampler(i_dataset, shuffle=True):
if distribute:
return torch.utils.data.distributed.DistributedSampler(i_dataset, shuffle=shuffle)
elif shuffle:
return torch.utils.data.RandomSampler(i_dataset)
else:
return torch.utils.data.SequentialSampler(i_dataset)
if split == 'train':
sampler = get_sampler(train_dataset)
dataloader = DataLoader(train_dataset,
batch_size=config.TRAIN.BATCH_SIZE // num_gpus,
num_workers=config.WORKERS // num_gpus,
pin_memory=True,
sampler=sampler,
collate_fn=train_dataset.get_collate_fn())
elif split == 'val':
sampler = get_sampler(val_dataset, shuffle=False)
dataloader = DataLoader(val_dataset,
batch_size=config.TEST.BATCH_SIZE // num_gpus,
num_workers=config.WORKERS // num_gpus,
pin_memory=False,
sampler=sampler,
collate_fn=val_dataset.get_collate_fn())
elif split == 'test':
sampler = get_sampler(test_dataset, shuffle=False)
dataloader = DataLoader(test_dataset,
batch_size=config.TEST.BATCH_SIZE // num_gpus,
num_workers=config.WORKERS // num_gpus,
pin_memory=True,
sampler=sampler,
collate_fn=test_dataset.get_collate_fn())
elif split == 'train_no_shuffle':
sampler = get_sampler(eval_train_dataset, shuffle=False)
dataloader = DataLoader(eval_train_dataset,
batch_size=config.TEST.BATCH_SIZE // num_gpus,
num_workers=config.WORKERS // num_gpus,
pin_memory=True,
sampler=sampler,
collate_fn=eval_train_dataset.get_collate_fn())
else:
raise NotImplementedError
return dataloader
def network(sample, epoch=0):
if model_name == 'LEORN_F':
visual_input = sample['batch_vis_input'].cuda(non_blocking=True)
textual_input = sample['batch_word_vectors'].cuda(non_blocking=True)
textual_mask = sample['batch_txt_mask'].cuda(non_blocking=True)
rcnn_input = sample['batch_rcnn_input'].cuda(non_blocking=True)
rcnn_mask = sample['batch_rcnn_mask'].cuda(non_blocking=True)
rcnn_bbox = sample['batch_rcnn_bbox'].cuda(non_blocking=True)
map_gt = sample['batch_map_gt'].cuda(non_blocking=True)
duration = sample['batch_duration']
if model_name == 'LEORN':
prediction, map_mask = model(textual_input, textual_mask, rcnn_input, rcnn_mask, rcnn_bbox)
else:
prediction, map_mask = model(textual_input, textual_mask, visual_input, rcnn_input, rcnn_mask, rcnn_bbox)
loss_value, joint_prob = getattr(loss, config.LOSS.NAME)(prediction, map_mask, map_gt, config.LOSS.PARAMS)
if model.training:
return loss_value, None
else:
sorted_times = get_proposal_results(joint_prob, duration)
return loss_value, torch.stack(sorted_times) # batchsize * num_clips * 2
def get_proposal_results(scores, durations):
# batchsize * 1 * 16 * 16 , batchsize
# assume all valid scores are larger than one
out_sorted_times = []
for score, duration in zip(scores, durations):
T = score.shape[-1]
sorted_indexs = np.dstack(
np.unravel_index(np.argsort(score.cpu().detach().numpy().ravel())[::-1], (T, T))).tolist()
sorted_indexs = np.array([item for item in sorted_indexs[0] if item[0] <= item[1]]).astype(float)
sorted_indexs[:, 1] = sorted_indexs[:, 1] + 1
sorted_indexs = torch.from_numpy(sorted_indexs).cuda()
target_size = config.DATASET.NUM_SAMPLE_CLIPS // config.DATASET.TARGET_STRIDE
out_sorted_times.append((sorted_indexs.float() / target_size * duration))
return out_sorted_times
def get_reg_proposal_results(scores, durations, reg_map):
# batchsize * 1 * 16 * 16 , batchsize
# reg_map : batchsize * 2 * 16 * 16
# assume all valid scores are larger than one
out_sorted_times = []
for score, duration, reg in zip(scores, durations, reg_map):
T = score.shape[-1]
sorted_index = np.dstack(
np.unravel_index(np.argsort(score.cpu().detach().numpy().ravel())[::-1], (T, T))).tolist()
sorted_indexs = np.array([item for item in sorted_index[0] if item[0] <= item[1]]).astype(float)
sorted_reg = torch.stack([reg[:, s, e] for s, e in sorted_index[0] if s <= e], dim=0)
sorted_indexs[:, 1] = sorted_indexs[:, 1] + 1
sorted_indexs = torch.from_numpy(sorted_indexs).cuda()
target_size = config.DATASET.NUM_SAMPLE_CLIPS // config.DATASET.TARGET_STRIDE
sorted_time = (sorted_indexs.float() / target_size + sorted_reg)
sorted_time[:, 0] = sorted_time[:, 0].masked_fill(sorted_time[:, 0] < 0, 0)
sorted_time[:, 1] = sorted_time[:, 1].masked_fill(sorted_time[:, 1] > 1, 1)
sorted_time = sorted_time * duration
out_sorted_times.append(sorted_time)
return out_sorted_times
def on_start(state):
state['test_interval'] = math.ceil(len(train_dataset) / config.TRAIN.BATCH_SIZE * config.TEST.INTERVAL)
if config.TRAIN.FP16:
state['scaler'] = torch.cuda.amp.GradScaler()
if config.TRAIN.FINE_TUNE and not config.TRAIN.CONTINUE:
loc = 'cuda:{}'.format(args.local_rank)
checkpoint = torch.load(config.MODEL.CHECKPOINT, map_location=loc)
model.module.load_object_params(checkpoint['model'])
if config.MODEL.CHECKPOINT and config.TRAIN.CONTINUE:
loc = 'cuda:{}'.format(args.local_rank)
checkpoint = torch.load(config.MODEL.CHECKPOINT, map_location=loc)
if 'optimizer' in checkpoint:
model.load_state_dict(checkpoint['model'])
state['optimizer'].load_state_dict(checkpoint['optimizer'])
state['scheduler'].load_state_dict(checkpoint['scheduler'])
state['t'] = checkpoint['step'] + 1
if ('scaler' in checkpoint) and (state['scaler'] is not None) and (checkpoint['scaler'] is not None):
state['scaler'].load_state_dict(checkpoint['scaler'])
state['epoch'] = state['t'] // state['test_interval']
else:
if distribute:
model.module.load_state_dict(checkpoint)
else:
model.load_state_dict(checkpoint)
state['loss_meter'] = AverageMeter()
tious = [float(i) for i in config.TEST.TIOU.split(',')] if isinstance(config.TEST.TIOU, str) else [
config.TEST.TIOU]
recalls = [int(i) for i in config.TEST.RECALL.split(',')] if isinstance(config.TEST.RECALL, str) else [
config.TEST.RECALL]
state['best'] = [[0 for _ in recalls] for _ in tious]
state['best_miou'] = 0
model.train()
if config.VERBOSE and main_work:
state['progress_bar'] = tqdm(total=state['test_interval'])
def on_start_epoch(state):
if distribute:
state['iterator'].sampler.set_epoch(state['epoch'])
def on_forward(state):
if state['t'] % state['step_accumulate'] == 0 or state['t'] % state['test_interval'] == 0:
if state['scaler'] is not None:
state['scaler'].unscale_(state['optimizer'])
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
if distribute:
dist.all_reduce(state['loss'], op=dist.ReduceOp.SUM)
state['loss_meter'].update(state['loss'].item() / num_gpus, 1)
# update the lr of transformer
if hasattr(model, 'adjust_lr'):
model.adjust_lr(state['optimizer'], state['t'])
def on_update(state): # Save All
# state['scheduler'].step()
if config.VERBOSE and main_work:
state['progress_bar'].update(1)
if state['t'] % state['test_interval'] == 0:
model.eval()
if distribute:
synchronize()
if config.VERBOSE and main_work:
state['progress_bar'].close()
loss_message = '\nepoch: {} iter: {} train loss {:.4f}'.format(state['epoch'], state['t'],
state['loss_meter'].avg)
tb_writer.add_scalars("LOSS", {'train': state['loss_meter'].avg}, state['t'])
table_message = ''
if config.TEST.EVAL_TRAIN and state['t'] % (state['test_interval'] * 2) == 0:
if distribute:
synchronize()
train_state = engine.test(network, iterator('train_no_shuffle'), 'train')
if main_work:
train_table = eval.display_results(train_state['Rank@N,mIoU@M'], train_state['miou'],
'performance on training set')
eval.write2tensorboard(tb_writer, train_state['Rank@N,mIoU@M'], train_state['miou'], state['t'],
'train')
table_message += '\n' + train_table
if not config.DATASET.NO_VAL:
if distribute:
synchronize()
val_state = engine.test(network, iterator('val'), 'val')
state['scheduler'].step(-val_state['loss_meter'].avg)
if main_work:
loss_message += ' val loss {:.4f}'.format(val_state['loss_meter'].avg)
tb_writer.add_scalars("LOSS", {'val': val_state['loss_meter'].avg}, state['t'])
val_state['loss_meter'].reset()
val_table = eval.display_results(val_state['Rank@N,mIoU@M'], val_state['miou'],
'performance on validation set')
eval.write2tensorboard(tb_writer, val_state['Rank@N,mIoU@M'], val_state['miou'], state['t'], 'val')
table_message += '\n' + val_table
if distribute:
synchronize()
test_state = engine.test(network, iterator('test'), 'test')
if main_work:
loss_message += ' test loss {:.4f}'.format(test_state['loss_meter'].avg)
tb_writer.add_scalars('LOSS', {'test': test_state['loss_meter'].avg}, state['t'])
test_state['loss_meter'].reset()
test_table = eval.display_results(test_state['Rank@N,mIoU@M'], test_state['miou'],
'performance on testing set')
eval.write2tensorboard(tb_writer, test_state['Rank@N,mIoU@M'], test_state['miou'], state['t'], 'test')
table_message += '\n' + test_table
message = loss_message + table_message + '\n'
logger.info(message)
tb_writer.flush()
# assert if better result
save_checkpoint = False
if test_state['miou'] > state['best_miou']:
state['best_miou'] = test_state['miou']
| |
machine scale set id.
:type recovery_virtual_machine_scale_set_id: str
"""
_validation = {
'instance_type': {'required': True},
'initial_primary_zone': {'readonly': True},
'initial_primary_fabric_location': {'readonly': True},
'initial_recovery_zone': {'readonly': True},
'initial_recovery_fabric_location': {'readonly': True},
'agent_certificate_expiry_date': {'readonly': True},
'vm_encryption_type': {'readonly': True},
'recovery_azure_generation': {'readonly': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'fabric_object_id': {'key': 'fabricObjectId', 'type': 'str'},
'initial_primary_zone': {'key': 'initialPrimaryZone', 'type': 'str'},
'initial_primary_fabric_location': {'key': 'initialPrimaryFabricLocation', 'type': 'str'},
'initial_recovery_zone': {'key': 'initialRecoveryZone', 'type': 'str'},
'initial_recovery_fabric_location': {'key': 'initialRecoveryFabricLocation', 'type': 'str'},
'multi_vm_group_id': {'key': 'multiVmGroupId', 'type': 'str'},
'multi_vm_group_name': {'key': 'multiVmGroupName', 'type': 'str'},
'multi_vm_group_create_option': {'key': 'multiVmGroupCreateOption', 'type': 'str'},
'management_id': {'key': 'managementId', 'type': 'str'},
'protected_disks': {'key': 'protectedDisks', 'type': '[A2AProtectedDiskDetails]'},
'unprotected_disks': {'key': 'unprotectedDisks', 'type': '[A2AUnprotectedDiskDetails]'},
'protected_managed_disks': {'key': 'protectedManagedDisks', 'type': '[A2AProtectedManagedDiskDetails]'},
'recovery_boot_diag_storage_account_id': {'key': 'recoveryBootDiagStorageAccountId', 'type': 'str'},
'primary_fabric_location': {'key': 'primaryFabricLocation', 'type': 'str'},
'recovery_fabric_location': {'key': 'recoveryFabricLocation', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'recovery_azure_vm_size': {'key': 'recoveryAzureVMSize', 'type': 'str'},
'recovery_azure_vm_name': {'key': 'recoveryAzureVMName', 'type': 'str'},
'recovery_azure_resource_group_id': {'key': 'recoveryAzureResourceGroupId', 'type': 'str'},
'recovery_cloud_service': {'key': 'recoveryCloudService', 'type': 'str'},
'recovery_availability_set': {'key': 'recoveryAvailabilitySet', 'type': 'str'},
'selected_recovery_azure_network_id': {'key': 'selectedRecoveryAzureNetworkId', 'type': 'str'},
'selected_tfo_azure_network_id': {'key': 'selectedTfoAzureNetworkId', 'type': 'str'},
'vm_nics': {'key': 'vmNics', 'type': '[VMNicDetails]'},
'vm_synced_config_details': {'key': 'vmSyncedConfigDetails', 'type': 'AzureToAzureVmSyncedConfigDetails'},
'monitoring_percentage_completion': {'key': 'monitoringPercentageCompletion', 'type': 'int'},
'monitoring_job_type': {'key': 'monitoringJobType', 'type': 'str'},
'last_heartbeat': {'key': 'lastHeartbeat', 'type': 'iso-8601'},
'agent_version': {'key': 'agentVersion', 'type': 'str'},
'agent_expiry_date': {'key': 'agentExpiryDate', 'type': 'iso-8601'},
'is_replication_agent_update_required': {'key': 'isReplicationAgentUpdateRequired', 'type': 'bool'},
'agent_certificate_expiry_date': {'key': 'agentCertificateExpiryDate', 'type': 'iso-8601'},
'is_replication_agent_certificate_update_required': {'key': 'isReplicationAgentCertificateUpdateRequired', 'type': 'bool'},
'recovery_fabric_object_id': {'key': 'recoveryFabricObjectId', 'type': 'str'},
'vm_protection_state': {'key': 'vmProtectionState', 'type': 'str'},
'vm_protection_state_description': {'key': 'vmProtectionStateDescription', 'type': 'str'},
'lifecycle_id': {'key': 'lifecycleId', 'type': 'str'},
'test_failover_recovery_fabric_object_id': {'key': 'testFailoverRecoveryFabricObjectId', 'type': 'str'},
'rpo_in_seconds': {'key': 'rpoInSeconds', 'type': 'long'},
'last_rpo_calculated_time': {'key': 'lastRpoCalculatedTime', 'type': 'iso-8601'},
'primary_availability_zone': {'key': 'primaryAvailabilityZone', 'type': 'str'},
'recovery_availability_zone': {'key': 'recoveryAvailabilityZone', 'type': 'str'},
'vm_encryption_type': {'key': 'vmEncryptionType', 'type': 'str'},
'tfo_azure_vm_name': {'key': 'tfoAzureVMName', 'type': 'str'},
'recovery_azure_generation': {'key': 'recoveryAzureGeneration', 'type': 'str'},
'recovery_proximity_placement_group_id': {'key': 'recoveryProximityPlacementGroupId', 'type': 'str'},
'auto_protection_of_data_disk': {'key': 'autoProtectionOfDataDisk', 'type': 'str'},
'recovery_virtual_machine_scale_set_id': {'key': 'recoveryVirtualMachineScaleSetId', 'type': 'str'},
}
def __init__(
self,
*,
fabric_object_id: Optional[str] = None,
multi_vm_group_id: Optional[str] = None,
multi_vm_group_name: Optional[str] = None,
multi_vm_group_create_option: Optional[Union[str, "MultiVmGroupCreateOption"]] = None,
management_id: Optional[str] = None,
protected_disks: Optional[List["A2AProtectedDiskDetails"]] = None,
unprotected_disks: Optional[List["A2AUnprotectedDiskDetails"]] = None,
protected_managed_disks: Optional[List["A2AProtectedManagedDiskDetails"]] = None,
recovery_boot_diag_storage_account_id: Optional[str] = None,
primary_fabric_location: Optional[str] = None,
recovery_fabric_location: Optional[str] = None,
os_type: Optional[str] = None,
recovery_azure_vm_size: Optional[str] = None,
recovery_azure_vm_name: Optional[str] = None,
recovery_azure_resource_group_id: Optional[str] = None,
recovery_cloud_service: Optional[str] = None,
recovery_availability_set: Optional[str] = None,
selected_recovery_azure_network_id: Optional[str] = None,
selected_tfo_azure_network_id: Optional[str] = None,
vm_nics: Optional[List["VMNicDetails"]] = None,
vm_synced_config_details: Optional["AzureToAzureVmSyncedConfigDetails"] = None,
monitoring_percentage_completion: Optional[int] = None,
monitoring_job_type: Optional[str] = None,
last_heartbeat: Optional[datetime.datetime] = None,
agent_version: Optional[str] = None,
agent_expiry_date: Optional[datetime.datetime] = None,
is_replication_agent_update_required: Optional[bool] = None,
is_replication_agent_certificate_update_required: Optional[bool] = None,
recovery_fabric_object_id: Optional[str] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None,
lifecycle_id: Optional[str] = None,
test_failover_recovery_fabric_object_id: Optional[str] = None,
rpo_in_seconds: Optional[int] = None,
last_rpo_calculated_time: Optional[datetime.datetime] = None,
primary_availability_zone: Optional[str] = None,
recovery_availability_zone: Optional[str] = None,
tfo_azure_vm_name: Optional[str] = None,
recovery_proximity_placement_group_id: Optional[str] = None,
auto_protection_of_data_disk: Optional[Union[str, "AutoProtectionOfDataDisk"]] = None,
recovery_virtual_machine_scale_set_id: Optional[str] = None,
**kwargs
):
super(A2AReplicationDetails, self).__init__(**kwargs)
self.instance_type = 'A2A' # type: str
self.fabric_object_id = fabric_object_id
self.initial_primary_zone = None
self.initial_primary_fabric_location = None
self.initial_recovery_zone = None
self.initial_recovery_fabric_location = None
self.multi_vm_group_id = multi_vm_group_id
self.multi_vm_group_name = multi_vm_group_name
self.multi_vm_group_create_option = multi_vm_group_create_option
self.management_id = management_id
self.protected_disks = protected_disks
self.unprotected_disks = unprotected_disks
self.protected_managed_disks = protected_managed_disks
self.recovery_boot_diag_storage_account_id = recovery_boot_diag_storage_account_id
self.primary_fabric_location = primary_fabric_location
self.recovery_fabric_location = recovery_fabric_location
self.os_type = os_type
self.recovery_azure_vm_size = recovery_azure_vm_size
self.recovery_azure_vm_name = recovery_azure_vm_name
self.recovery_azure_resource_group_id = recovery_azure_resource_group_id
self.recovery_cloud_service = recovery_cloud_service
self.recovery_availability_set = recovery_availability_set
self.selected_recovery_azure_network_id = selected_recovery_azure_network_id
self.selected_tfo_azure_network_id = selected_tfo_azure_network_id
self.vm_nics = vm_nics
self.vm_synced_config_details = vm_synced_config_details
self.monitoring_percentage_completion = monitoring_percentage_completion
self.monitoring_job_type = monitoring_job_type
self.last_heartbeat = last_heartbeat
self.agent_version = agent_version
self.agent_expiry_date = agent_expiry_date
self.is_replication_agent_update_required = is_replication_agent_update_required
self.agent_certificate_expiry_date = None
self.is_replication_agent_certificate_update_required = is_replication_agent_certificate_update_required
self.recovery_fabric_object_id = recovery_fabric_object_id
self.vm_protection_state = vm_protection_state
self.vm_protection_state_description = vm_protection_state_description
self.lifecycle_id = lifecycle_id
self.test_failover_recovery_fabric_object_id = test_failover_recovery_fabric_object_id
self.rpo_in_seconds = rpo_in_seconds
self.last_rpo_calculated_time = last_rpo_calculated_time
self.primary_availability_zone = primary_availability_zone
self.recovery_availability_zone = recovery_availability_zone
self.vm_encryption_type = None
self.tfo_azure_vm_name = tfo_azure_vm_name
self.recovery_azure_generation = None
self.recovery_proximity_placement_group_id = recovery_proximity_placement_group_id
self.auto_protection_of_data_disk = auto_protection_of_data_disk
self.recovery_virtual_machine_scale_set_id = recovery_virtual_machine_scale_set_id
class ReplicationProtectionIntentProviderSpecificSettings(msrest.serialization.Model):
"""Replication provider specific settings.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: A2AReplicationIntentDetails.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the Instance type.Constant filled by server.
:type instance_type: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
}
_subtype_map = {
'instance_type': {'A2A': 'A2AReplicationIntentDetails'}
}
def __init__(
self,
**kwargs
):
super(ReplicationProtectionIntentProviderSpecificSettings, self).__init__(**kwargs)
self.instance_type = None # type: Optional[str]
class A2AReplicationIntentDetails(ReplicationProtectionIntentProviderSpecificSettings):
"""A2A provider specific settings.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the Instance type.Constant filled by server.
:type instance_type: str
:param fabric_object_id: The fabric specific object Id of the virtual machine.
:type fabric_object_id: str
:param primary_location: The primary location for the virtual machine.
:type primary_location: str
:param recovery_location: The recovery location for the virtual machine.
:type recovery_location: str
:param recovery_subscription_id: The recovery subscription Id of the virtual machine.
:type recovery_subscription_id: str
:param vm_disks: The list of vm disk details.
:type vm_disks:
list[~azure.mgmt.recoveryservicessiterecovery.models.A2AProtectionIntentDiskInputDetails]
:param vm_managed_disks: The list of vm managed disk details.
:type vm_managed_disks:
list[~azure.mgmt.recoveryservicessiterecovery.models.A2AProtectionIntentManagedDiskInputDetails]
:param recovery_resource_group_id: The recovery resource group id.
:type recovery_resource_group_id: str
:param protection_profile: The protection profile custom details.
:type protection_profile:
~azure.mgmt.recoveryservicessiterecovery.models.ProtectionProfileCustomDetails
:param primary_staging_storage_account: The primary staging storage account details.
:type primary_staging_storage_account:
~azure.mgmt.recoveryservicessiterecovery.models.StorageAccountCustomDetails
:param recovery_availability_set: The recovery availability set details.
:type recovery_availability_set:
~azure.mgmt.recoveryservicessiterecovery.models.RecoveryAvailabilitySetCustomDetails
:param recovery_virtual_network: The recovery virtual network details.
:type recovery_virtual_network:
~azure.mgmt.recoveryservicessiterecovery.models.RecoveryVirtualNetworkCustomDetails
:param recovery_proximity_placement_group: The recovery proximity placement group custom
details.
:type recovery_proximity_placement_group:
~azure.mgmt.recoveryservicessiterecovery.models.RecoveryProximityPlacementGroupCustomDetails
:param auto_protection_of_data_disk: A value indicating whether the auto protection is enabled.
Possible values include: "Disabled", "Enabled".
:type auto_protection_of_data_disk: str or
~azure.mgmt.recoveryservicessiterecovery.models.AutoProtectionOfDataDisk
:param multi_vm_group_name: The multi vm group name.
:type multi_vm_group_name: str
:param multi_vm_group_id: The multi vm group id.
:type multi_vm_group_id: str
:param recovery_boot_diag_storage_account: The boot diagnostic storage account.
:type recovery_boot_diag_storage_account:
~azure.mgmt.recoveryservicessiterecovery.models.StorageAccountCustomDetails
:param disk_encryption_info: The recovery disk encryption information (for two pass flows).
:type disk_encryption_info: ~azure.mgmt.recoveryservicessiterecovery.models.DiskEncryptionInfo
:param recovery_availability_zone: The recovery availability zone.
:type recovery_availability_zone: str
:param recovery_availability_type: Required. The recovery availability type of the virtual
machine.
:type recovery_availability_type: str
"""
_validation = {
'instance_type': {'required': True},
'recovery_availability_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'fabric_object_id': {'key': 'fabricObjectId', 'type': 'str'},
'primary_location': {'key': 'primaryLocation', 'type': 'str'},
'recovery_location': {'key': 'recoveryLocation', 'type': 'str'},
'recovery_subscription_id': {'key': 'recoverySubscriptionId', 'type': 'str'},
'vm_disks': {'key': 'vmDisks', 'type': '[A2AProtectionIntentDiskInputDetails]'},
'vm_managed_disks': {'key': 'vmManagedDisks', 'type': '[A2AProtectionIntentManagedDiskInputDetails]'},
'recovery_resource_group_id': {'key': 'recoveryResourceGroupId', 'type': 'str'},
'protection_profile': {'key': 'protectionProfile', 'type': 'ProtectionProfileCustomDetails'},
'primary_staging_storage_account': {'key': 'primaryStagingStorageAccount', 'type': 'StorageAccountCustomDetails'},
'recovery_availability_set': {'key': 'recoveryAvailabilitySet', 'type': 'RecoveryAvailabilitySetCustomDetails'},
'recovery_virtual_network': {'key': 'recoveryVirtualNetwork', 'type': 'RecoveryVirtualNetworkCustomDetails'},
'recovery_proximity_placement_group': {'key': 'recoveryProximityPlacementGroup', 'type': 'RecoveryProximityPlacementGroupCustomDetails'},
'auto_protection_of_data_disk': {'key': 'autoProtectionOfDataDisk', 'type': 'str'},
'multi_vm_group_name': {'key': 'multiVmGroupName', 'type': 'str'},
'multi_vm_group_id': {'key': 'multiVmGroupId', 'type': 'str'},
'recovery_boot_diag_storage_account': {'key': 'recoveryBootDiagStorageAccount', 'type': 'StorageAccountCustomDetails'},
'disk_encryption_info': {'key': 'diskEncryptionInfo', 'type': 'DiskEncryptionInfo'},
'recovery_availability_zone': {'key': 'recoveryAvailabilityZone', 'type': 'str'},
'recovery_availability_type': {'key': 'recoveryAvailabilityType', 'type': 'str'},
}
def __init__(
self,
*,
recovery_availability_type: str,
fabric_object_id: Optional[str] = None,
primary_location: Optional[str] = None,
recovery_location: Optional[str] = None,
recovery_subscription_id: Optional[str] = None,
vm_disks: Optional[List["A2AProtectionIntentDiskInputDetails"]] = None,
vm_managed_disks: Optional[List["A2AProtectionIntentManagedDiskInputDetails"]] = None,
recovery_resource_group_id: Optional[str] = None,
protection_profile: Optional["ProtectionProfileCustomDetails"] = None,
primary_staging_storage_account: Optional["StorageAccountCustomDetails"] = None,
recovery_availability_set: Optional["RecoveryAvailabilitySetCustomDetails"] = None,
recovery_virtual_network: Optional["RecoveryVirtualNetworkCustomDetails"] = None,
recovery_proximity_placement_group: Optional["RecoveryProximityPlacementGroupCustomDetails"] = None,
auto_protection_of_data_disk: Optional[Union[str, "AutoProtectionOfDataDisk"]] = None,
multi_vm_group_name: Optional[str] = None,
multi_vm_group_id: Optional[str] = None,
recovery_boot_diag_storage_account: Optional["StorageAccountCustomDetails"] = None,
disk_encryption_info: Optional["DiskEncryptionInfo"] = None,
recovery_availability_zone: Optional[str] = None,
**kwargs
):
super(A2AReplicationIntentDetails, self).__init__(**kwargs)
self.instance_type = 'A2A' # type: str
self.fabric_object_id = fabric_object_id
self.primary_location = primary_location
self.recovery_location = recovery_location
self.recovery_subscription_id = recovery_subscription_id
self.vm_disks = vm_disks
self.vm_managed_disks = vm_managed_disks
self.recovery_resource_group_id = recovery_resource_group_id
self.protection_profile = protection_profile
self.primary_staging_storage_account = primary_staging_storage_account
self.recovery_availability_set = recovery_availability_set
self.recovery_virtual_network = recovery_virtual_network
self.recovery_proximity_placement_group = recovery_proximity_placement_group
self.auto_protection_of_data_disk = auto_protection_of_data_disk
self.multi_vm_group_name = multi_vm_group_name
self.multi_vm_group_id = multi_vm_group_id
self.recovery_boot_diag_storage_account = recovery_boot_diag_storage_account
self.disk_encryption_info = disk_encryption_info
self.recovery_availability_zone = recovery_availability_zone
self.recovery_availability_type = recovery_availability_type
class ReverseReplicationProviderSpecificInput(msrest.serialization.Model):
"""Provider specific reverse replication input.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: A2AReprotectInput, HyperVReplicaAzureReprotectInput, InMageReprotectInput, InMageAzureV2ReprotectInput, InMageRcmReprotectInput, InMageRcmFailbackReprotectInput.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
}
_subtype_map = {
'instance_type': | |
#!/usr/bin/env python3
""" Tests for the path database """
# Standard lib
import pathlib
import unittest
# Our own imports
from deep_hipsc_tracking import utils
from .. import helpers
# Tests
class TestIsNonemptyDir(helpers.FileSystemTestCase):
def test_returns_false_for_empty_dirs(self):
p1 = self.tempdir / 'foo'
p1.mkdir()
(p1 / '.hidden').touch()
res = utils.is_nonempty_dir(p1)
self.assertFalse(res)
def test_returns_true_for_dirs_with_things(self):
p1 = self.tempdir / 'foo'
p1.mkdir()
(p1 / 'not_hidden').touch()
res = utils.is_nonempty_dir(p1)
self.assertTrue(res)
class TestParseTrainingDir(unittest.TestCase):
def test_parses_basedir_no_run(self):
path = pathlib.Path('ai-upsample-peaks-composite-d3-opt')
res = utils.parse_training_dir(path)
exp = {'detector': 'composite-d3-opt',
'run': None,
'num_iters': None,
'training_set': 'peaks'}
self.assertEqual(res, exp)
def test_parses_basedir_with_run(self):
path = pathlib.Path('ai-upsample-peaks-fcrn_a_wide-run003')
res = utils.parse_training_dir(path)
exp = {'detector': 'fcrn_a_wide',
'run': 3,
'num_iters': None,
'training_set': 'peaks'}
self.assertEqual(res, exp)
def test_parses_iterdir_with_run(self):
path = pathlib.Path('ai-upsample-peaks-fcrn_a_wide-run003/ai-upsample-peaks-n75000')
res = utils.parse_training_dir(path)
exp = {'detector': 'fcrn_a_wide',
'run': 3,
'num_iters': 75000,
'training_set': 'peaks'}
self.assertEqual(res, exp)
def test_parses_iterdir_with_run_different_set(self):
path = pathlib.Path('ai-upsample-confocal-fcrn_a_wide-run003/ai-upsample-confocal-n75000')
res = utils.parse_training_dir(path)
exp = {'detector': 'fcrn_a_wide',
'run': 3,
'num_iters': 75000,
'training_set': 'confocal'}
self.assertEqual(res, exp)
def test_errors_on_iterdir_rundir_mismatch(self):
path = pathlib.Path('ai-upsample-confocal-fcrn_a_wide-run003/ai-upsample-peaks-n75000')
with self.assertRaises(KeyError):
utils.parse_training_dir(path)
class TestGetRootdir(helpers.FileSystemTestCase):
def test_no_rootdir(self):
inpath = self.tempdir / 'foo/bar'
inpath.mkdir(parents=True)
res = utils.get_rootdir(inpath)
self.assertIsNone(res)
def test_gets_rootdir_from_imagefile_example(self):
rootdir = self.tempdir / 'foo/bar/example_confocal'
rootdir.mkdir(parents=True)
inpath = rootdir / 'EGFP/s04-bees/foo-s04t003.tif'
res = utils.get_rootdir(inpath)
self.assertEqual(res, rootdir)
def test_gets_rootdir_from_imagefile_config_file(self):
rootdir = self.tempdir / 'foo/bar/baz'
rootdir.mkdir(parents=True)
(rootdir / 'deep_tracking.ini').touch()
inpath = rootdir / 'EGFP/s04-bees/foo-s04t003.tif'
res = utils.get_rootdir(inpath)
self.assertEqual(res, rootdir)
def test_gets_rootdir_from_rootdir(self):
inpath = self.tempdir / 'foo/bar/2017-02-12'
inpath.mkdir(parents=True)
res = utils.get_rootdir(inpath)
self.assertEqual(res, inpath)
def test_gets_rootdir_from_imagefile(self):
rootdir = self.tempdir / 'foo/bar/2017-02-12'
rootdir.mkdir(parents=True)
inpath = rootdir / 'EGFP/s04-bees/foo-s04t003.tif'
res = utils.get_rootdir(inpath)
self.assertEqual(res, rootdir)
def test_gets_rootdir_from_tiledir(self):
rootdir = self.tempdir / 'foo/bar/2017-02-12'
rootdir.mkdir(parents=True)
inpath = rootdir / 'EGFP/s04-bees'
res = utils.get_rootdir(inpath)
self.assertEqual(res, rootdir)
def test_gets_rootdir_from_channeldir(self):
rootdir = self.tempdir / 'foo/bar/2017-02-12'
rootdir.mkdir(parents=True)
inpath = rootdir / 'EGFP/'
res = utils.get_rootdir(inpath)
self.assertEqual(res, rootdir)
def test_ignores_weird_rootdir(self):
rootdir = self.tempdir / 'foo/bar/2017-02-12-bees'
rootdir.mkdir(parents=True)
inpath = rootdir / 'EGFP/'
res = utils.get_rootdir(inpath)
self.assertIsNone(res)
def test_ignores_non_existant_rootdir(self):
rootdir = self.tempdir / 'foo/bar/2017-02-12'
self.assertFalse(rootdir.is_dir())
inpath = rootdir / 'EGFP/'
res = utils.get_rootdir(inpath)
self.assertIsNone(res)
class TestFindCommonBasedir(helpers.FileSystemTestCase):
def test_find_basedir_no_idea(self):
res = utils.find_common_basedir()
self.assertIsNone(res)
def test_find_basedir_no_rootdirs(self):
basedir = self.tempdir / 'base'
res = utils.find_common_basedir(basedir=basedir)
self.assertEqual(res, basedir)
def test_find_basedir_rootdirs_and_basedir(self):
basedir = self.tempdir / 'base'
r1 = basedir / '2016-10-01'
r2 = basedir / '2017-01-23'
res = utils.find_common_basedir(rootdirs=[r1, r2], basedir=basedir)
self.assertEqual(res, basedir)
def test_find_basedir_rootdirs_same_level(self):
basedir = self.tempdir / 'base'
r1 = basedir / '2016-10-01'
r2 = basedir / '2017-01-23'
res = utils.find_common_basedir(rootdirs=[r1, r2])
self.assertEqual(res, basedir)
def test_find_basedir_rootdirs_nested_levels(self):
basedir = self.tempdir / 'base'
r1 = basedir / 'p1' / '2016-10-01'
r2 = basedir / 'p2' / '2017-01-23'
res = utils.find_common_basedir(rootdirs=[r1, r2])
self.assertEqual(res, basedir)
def test_find_basedir_rootdirs_staggered_levels(self):
basedir = self.tempdir / 'base'
r1 = basedir / '2016-10-01'
r2 = basedir / 'p2' / '2017-01-23'
res = utils.find_common_basedir(rootdirs=[r1, r2])
self.assertEqual(res, basedir)
class TestFindTimepoints(helpers.FileSystemTestCase):
def test_no_tiledir(self):
r1 = self.tempdir / 's03'
with self.assertRaises(OSError):
list(utils.find_timepoints(r1, timepoints=3, suffix='.tif'))
def test_one_timepoint(self):
r1 = self.tempdir / 's03'
r1.mkdir()
f1 = r1 / 'foo_s03t003.tif'
f1.touch()
res = list(utils.find_timepoints(r1, timepoints=3, suffix='.tif'))
exp = [(3, f1)]
self.assertEqual(res, exp)
def test_three_timepoints(self):
r1 = self.tempdir / 's03'
r1.mkdir()
f1 = r1 / 'foo_s03t003.tif'
f1.touch()
(r1 / 'foo_s03t005.tif').touch()
f3 = r1 / 'foo_s03t099.tif'
f3.touch()
(r1 / 'foo_s03t099.npz').touch()
f4 = r1 / 'foo_s03t100.tif'
f4.touch()
res = list(utils.find_timepoints(r1, timepoints=[3, 99, 100], suffix='.tif'))
exp = [(3, f1), (99, f3), (100, f4)]
self.assertEqual(res, exp)
class TestFindTimepoint(helpers.FileSystemTestCase):
def test_no_tiledir(self):
r1 = self.tempdir / 's03'
res = utils.find_timepoint(r1, tile=3, timepoint=5)
self.assertIsNone(res)
def test_tiledir_empty(self):
r1 = self.tempdir / 's03'
r1.mkdir(parents=True)
res = utils.find_timepoint(r1, tile=3, timepoint=5)
self.assertIsNone(res)
def test_tiledir_no_match(self):
r1 = self.tempdir / 's03'
r1.mkdir(parents=True)
f1 = r1 / 'grr_s03t001.jpg'
with f1.open('wt') as fp:
fp.write('BAD')
res = utils.find_timepoint(r1, tile=3, timepoint=5)
self.assertIsNone(res)
def test_tiledir_match(self):
r1 = self.tempdir / 's03'
r1.mkdir(parents=True)
f1 = r1 / 'grr_s03t005.jpg'
with f1.open('wt') as fp:
fp.write('BAD')
res = utils.find_timepoint(r1, tile=3, timepoint=5)
self.assertEqual(res, f1)
def test_tiledir_matches_first(self):
r1 = self.tempdir / 's03'
r1.mkdir(parents=True)
f1 = r1 / 'a_s03t005.jpg'
with f1.open('wt') as fp:
fp.write('BAD')
f2 = r1 / 'b_s03t005.jpg'
with f2.open('wt') as fp:
fp.write('BAD')
res = utils.find_timepoint(r1, tile=3, timepoint=5)
self.assertEqual(res, f1)
def test_tiledir_matches_prefix(self):
r1 = self.tempdir / 's03'
r1.mkdir(parents=True)
f1 = r1 / 'a_s03t005.jpg'
with f1.open('wt') as fp:
fp.write('BAD')
f2 = r1 / 'b_s03t005.jpg'
with f2.open('wt') as fp:
fp.write('BAD')
res = utils.find_timepoint(r1, tile=3, timepoint=5, prefix='b')
self.assertEqual(res, f2)
class TestFindTiledirs(helpers.FileSystemTestCase):
def test_finds_no_tiles(self):
res = list(utils.find_tiledirs(self.tempdir))
self.assertEqual(res, [])
def test_finds_one_tile_no_condition(self):
r1 = self.tempdir / 's03'
r1.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir))
self.assertEqual(res, [(3, r1)])
def test_finds_one_tile_with_condition(self):
r1 = self.tempdir / 's03-ponies'
r1.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir))
self.assertEqual(res, [(3, r1)])
def test_finds_three_tiles(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir))
self.assertEqual(res, [(1, r1), (2, r2), (5, r5)])
def test_ignores_files(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
bad = self.tempdir / 's04-bad'
with bad.open('wt') as fp:
fp.write('bad')
res = list(utils.find_tiledirs(self.tempdir))
self.assertEqual(res, [(1, r1), (2, r2), (5, r5)])
def test_ignores_unparsable_dirs(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
bad = self.tempdir / 'agkjslakdhjfld'
bad.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir))
self.assertEqual(res, [(1, r1), (2, r2), (5, r5)])
def test_works_with_str_basedir(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(str(self.tempdir)))
self.assertEqual(res, [(1, r1), (2, r2), (5, r5)])
def test_can_filter_on_tile_numbers(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, tiles=[2, 5]))
self.assertEqual(res, [(2, r2), (5, r5)])
def test_can_filter_on_tile_number_strings(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, tiles=['2', 5]))
self.assertEqual(res, [(2, r2), (5, r5)])
def test_can_filter_on_single_tile_number(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, tiles=5))
self.assertEqual(res, [(5, r5)])
def test_can_filter_on_single_tile_number_string(self):
r5 = self.tempdir / 's05'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-toast'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-grr'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, tiles='2'))
self.assertEqual(res, [(2, r2)])
def test_can_filter_on_single_condition(self):
r5 = self.tempdir / 's05-h7-1'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-h7-2'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-wtc-2'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, conditions=['h7']))
self.assertEqual(res, [(1, r1), (5, r5)])
def test_can_filter_on_multiple_conditions(self):
r5 = self.tempdir / 's05-h7-1'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-wtb-2'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-wtc-2'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, conditions=['wtb', 'h7']))
self.assertEqual(res, [(1, r1), (5, r5)])
def test_condition_filter_case_insensitive(self):
r5 = self.tempdir / 's05-H7-1'
r5.mkdir(parents=True)
r1 = self.tempdir / 's01-wTb-2'
r1.mkdir(parents=True)
r2 = self.tempdir / 's02-wtc-2'
r2.mkdir(parents=True)
res = list(utils.find_tiledirs(self.tempdir, conditions=['WTB', 'h7']))
self.assertEqual(res, [(1, r1), (5, r5)])
class TestPairTiledirs(helpers.FileSystemTestCase):
def test_pair_zero_dirs(self):
res = utils.pair_tiledirs()
exp = []
self.assertEqual(res, exp)
def test_pair_one_dir(self):
r1 = self.tempdir / 'bees'
t1_1 = r1 / 's01'
t1_1.mkdir(parents=True)
res = utils.pair_tiledirs(r1)
exp = [(t1_1, )]
self.assertEqual(res, exp)
def test_pair_two_dirs_one_match(self):
r1 = self.tempdir / 'bees'
r2 = self.tempdir / 'toast'
t1_1 = r1 / 's01'
t1_1.mkdir(parents=True)
t2_1 = r2 / 's01-bees'
t2_1.mkdir(parents=True)
res = utils.pair_tiledirs(r1, r2)
exp = [(t1_1, t2_1)]
self.assertEqual(res, exp)
def test_pair_two_dirs_two_matches(self):
r1 = self.tempdir / 'bees'
r2 = self.tempdir / 'toast'
t1_1 = r1 / 's01'
t1_1.mkdir(parents=True)
t1_2 = r1 / 's02-grr'
t1_2.mkdir(parents=True)
t2_1 = r2 / 's01-bees'
t2_1.mkdir(parents=True)
t2_2 = r2 / 's02'
t2_2.mkdir(parents=True)
res = utils.pair_tiledirs(r1, r2)
exp = [(t1_1, t2_1), (t1_2, t2_2)]
self.assertEqual(res, exp)
def test_pair_three_dirs_two_matches(self):
r1 = self.tempdir / 'bees'
r2 = self.tempdir / 'toast'
r3 = self.tempdir / 'buzz'
t1_1 = r1 / 's01'
t1_1.mkdir(parents=True)
t1_2 = r1 / 's02-grr'
t1_2.mkdir(parents=True)
t2_1 = r2 / 's01-bees'
t2_1.mkdir(parents=True)
t2_2 = r2 / 's02'
t2_2.mkdir(parents=True)
t3_1 = r3 / 's01-bees'
t3_1.mkdir(parents=True)
t3_2 = r3 / 's02-grr'
t3_2.mkdir(parents=True)
res = utils.pair_tiledirs(r1, r2, r3)
exp = [(t1_1, t2_1, t3_1), (t1_2, t2_2, t3_2)]
self.assertEqual(res, exp)
def test_pair_three_dirs_missing_tiles(self):
r1 = self.tempdir / 'bees'
r2 = self.tempdir / 'toast'
r3 = self.tempdir / 'buzz'
t1_1 = r1 / 's01'
t1_1.mkdir(parents=True)
t2_1 = r2 / 's01-bees'
t2_1.mkdir(parents=True)
t2_2 = r2 / 's02'
t2_2.mkdir(parents=True)
t3_2 = r3 / 's02-grr'
t3_2.mkdir(parents=True)
with self.assertRaises(OSError):
utils.pair_tiledirs(r1, r2, r3)
def test_pair_three_dirs_can_allow_unpaired(self):
r1 = self.tempdir / 'bees'
r2 = self.tempdir / 'toast'
r3 = self.tempdir / 'buzz'
t1_1 = r1 / 's01'
t1_1.mkdir(parents=True)
t2_1 = r2 / 's01-bees'
| |
import functools
import os
import random
import threading
import time
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, Iterator, Optional
from uuid import UUID, uuid4
import requests
from fastapi import FastAPI, HTTPException
from fastapi.exception_handlers import request_validation_exception_handler
from fastapi.exceptions import RequestValidationError
from pydantic import BaseModel
from uvicorn import Config, Server
import asyncio
from contextlib import suppress
PORT = int(os.getenv("PORT", "8000"))
app = FastAPI()
class ResolverBody(BaseModel):
operacao: str
arguments: Dict[str, str]
@dataclass
class RecursoBody:
codigo_de_acesso: str
_uuid: Optional[UUID] = None
valor: Optional[int] = None
validade: Optional[datetime] = None
# constructor
def __init__(self, codigo_de_acesso: UUID, valor: int, validade: datetime) -> None:
self._uuid = codigo_de_acesso
self.valor = valor
self.validade = validade
EXPIRACAO = 5
@dataclass
class InfoBody:
server_name: str
server_endpoint: str
descricao: str
versao: float
status: str
tipo_de_eleicao_ativa: str
def __init__(self) -> None:
self.server_name = "sd_microservice"
self.server_endpoint = "https://pratica-sd.herokuapp.com/"
self.descricao = "Projeto de SD. Os seguintes serviços estão implementados. GET: [/, /info, /peers, /peers/{id}, /fruits, /clients]. POST: [/resolver, /peers, /echo]. PUT: [/info, /peers/{id}]. DELETE: [/peer/{id}]"
self.versao = 0.1
self.status = "online"
self.tipo_de_eleicao_ativa = "anel"
def get_atts(self):
return [
self.server_name,
self.server_endpoint,
self.descricao,
self.versao,
self.status,
self.tipo_de_eleicao_ativa,
]
@dataclass
class Valid(Enum):
VALID = 0b00
INVALID = 0b01
DUPLICATE = 0b10
class EleicaoBody(BaseModel):
id: str
dados: Optional[list[str]] = None
@dataclass
class CoordenadorBody:
coordenador: str
id_eleicao: str
recursos: dict[UUID, RecursoBody] = {}
glInfo = InfoBody()
glPeers = [
# {
# "id": "201810665",
# "nome": "<NAME>",
# "url": "https://jenilsonramos-sd-20211.herokuapp.com/",
# },
# {
# "id": "201720308",
# "nome": "<NAME>",
# "url": "https://sd-victor-20212.herokuapp.com/",
# },
{
"id": "201720295",
"nome": "<NAME>",
"url": "https://sd-ascampos-20212.herokuapp.com/",
},
{
"id": "201710396",
"nome": "<NAME>",
"url": "https://pratica-sd.herokuapp.com/",
},
{
"id": "201710377",
"nome": "<NAME>",
"url": "https://sd-api-uesc.herokuapp.com/",
},
{
"id": "201710376",
"nome": "<NAME>",
"url": "https://nodejs-sd-guilhermesenna.herokuapp.com/",
},
# {
# "id": "201710375",
# "nome": "<NAME>",
# "url": "https://sd-emmanuel.herokuapp.com/",
# },
# {
# "id": "201620400",
# "nome": "<NAME>",
# "url": "https://sd-nassimrihan-2021-2.herokuapp.com/",
# },
# {
# "id": "201610337",
# "nome": "<NAME>",
# "url": "https://sd-20212-luiscarlos.herokuapp.com/",
# },
# {
# "id": "201610327",
# "nome": "<NAME>",
# "url": "https://sd-joaopedrop-20212.herokuapp.com/",
# },
# {
# "id": "201512136",
# "nome": "<NAME>",
# "url": "https://sd-annyaourives-20212.herokuapp.com/hello/",
# },
]
myUrl = "https://pratica-sd.herokuapp.com/"
myId = "201710396"
coordenador = {
"coordenador": False,
"coordenador_atual": "",
}
id_eleicao_atual = ""
eleicoes: set[str] = set()
interval_check = 2.0
def is_peer_valid(peer: dict[str, str]) -> Valid:
for (k, v) in peer.items():
if k != "id" and k != "nome" and k != "url":
return Valid.INVALID
if k == "id" or k == "nome" or k == "url":
if k == "nome" and v.isdigit():
return Valid.INVALID
if k == "url" and not v.startswith("http"):
return Valid.INVALID
try:
glPeers.index(peer)
return Valid.DUPLICATE
except ValueError:
return Valid.VALID
@app.get("/")
def index():
return {
"routes": {
"GET": {
"/": "This page",
"/fruits": "List of fruits",
"/clients": "List of Clients",
},
"POST": {
"/echo": "Echoes the passed parameter",
"/resolver": {
"body": {
"resolver": "operacao",
"nome": "name of the person to match a service url",
},
"response": {"url": "url of the service of the matched name"},
},
},
},
}
def recurso_expirou(validade: datetime) -> bool:
return datetime.now() - validade >= timedelta(seconds=EXPIRACAO)
def log(sev: str, comment: str, msg: str):
requests.post(
"https://sd-log-server.herokuapp.com/log",
json={
"from": myUrl,
"severity": sev,
"comment": comment,
"body": msg,
},
)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc: RequestValidationError):
print(f"OMG! The client sent invalid data!: {exc.body}")
return await request_validation_exception_handler(request, exc)
@app.put("/tipo_eleicao")
def update_tipo_eleicao(tipo_eleicao: str):
global glInfo
glInfo.tipo_de_eleicao_ativa = tipo_eleicao
return {"tipo_eleicao": tipo_eleicao}
@app.post("/offline/{body}")
def post_offline(body: bool = True):
global glInfo
if body:
glInfo.status = "offline"
else:
glInfo.status = "online"
return {"status": glInfo.status}
@app.get("/eleicao")
def get_eleicao():
return {
"tipo_de_eleicao_ativa": glInfo.tipo_de_eleicao_ativa,
"eleicoes_em_andamento": eleicoes,
}
@app.post("/eleicao")
def post_eleicao(body: EleicaoBody):
if glInfo.status == "offline":
raise HTTPException(status_code=404, detail="Servidor offline")
global coordenador
if glInfo.status == "online" and (
glInfo.tipo_de_eleicao_ativa == "anel" or not eleicoes.__contains__(body.id)
):
eleicoes.add(body.id)
threading.Thread(target=eleicao, args=(body.id, body.dados, True)).start()
return body.dict()
@app.post("/eleicao/coordenador")
def post_eleicao_coordenador(body: CoordenadorBody):
if glInfo.status == "offline":
raise HTTPException(status_code=404, detail="Servidor offline")
global coordenador
coordenador["coordenador_atual"] = body.coordenador
coordenador["coordenador"] = body.coordenador == myId
log(
"Success",
f"Eleicao finalizada",
f"Eleicao: {body.id_eleicao}. Novo coordenador: {body.coordenador}",
)
print(f"Eleicao {body.id_eleicao} finalizada. Novo coordenador: {body.coordenador}")
eleicoes.discard(body.id_eleicao)
@app.post("/resetar")
def resetar_coord():
global coordenador
coordenador["coordenador"] = False
coordenador["coordenador_atual"] = ""
eleicoes.clear()
return {"status": "ok"}
@app.get("/recurso")
def get_recurso(body: RecursoBody):
try:
recurso = recursos.get(UUID(body.codigo_de_acesso))
except ValueError:
raise HTTPException(status_code=401, detail="Chave inválida")
if recurso is None or recurso_expirou(recurso.validade):
raise HTTPException(
status_code=401, detail="Recurso expirado ou não encontrado"
)
return {"valor": recurso.valor}
@app.delete("/recurso")
def delete_recurso(body: RecursoBody):
recurso = recursos.get(UUID(body.codigo_de_acesso))
if recurso is None:
raise HTTPException(status_code=410, detail="Recurso não existe")
if recurso._uuid is not None:
del recursos[recurso._uuid]
if recurso_expirou(recurso.validade):
raise HTTPException(status_code=410, detail="Recurso expirado")
@app.put("/recurso")
def put_recurso(body: RecursoBody):
recurso = recursos.get(UUID(body.codigo_de_acesso))
if recurso is None or recurso_expirou(recurso.validade):
raise HTTPException(
status_code=401, detail="Recurso expirado ou não encontrado"
)
recurso.valor = body.valor
recurso.validade = datetime.now()
if recurso._uuid is not None:
recursos[recurso._uuid] = recurso
return {"codigo_de_acesso": recurso._uuid, "valor": recurso.valor}
@app.post("/recurso")
def post_recurso(body: RecursoBody = None):
if body is None:
uid = uuid4()
validade = datetime.now() + timedelta(seconds=EXPIRACAO)
recursos[uid] = RecursoBody(
codigo_de_acesso=uid,
valor=random.randint(1, 1000),
validade=validade,
)
return {"codigo_de_acesso": uid, "validade": validade}
else:
# get recurso from dict
recurso = recursos.get(UUID(body.codigo_de_acesso))
# raise exception with code 409 if recurso is not None and recurso
# validade is now plus EXPIRACAO seconds
if recurso is not None:
if recurso.validade is not None and not recurso_expirou(recurso.validade):
recurso.validade = datetime.now()
if recurso._uuid is not None:
recursos[recurso._uuid] = recurso
raise HTTPException(status_code=409, detail="Recurso em uso")
if body.valor is not None:
recurso.valor = body.valor
# update recurso validade
recurso.validade = datetime.now()
# update dict
if recurso._uuid is not None:
recursos[recurso._uuid] = recurso
return {
"codigo_de_acesso": recurso._uuid,
"validade": recurso.validade,
}
else:
uid = uuid4()
validade = datetime.now() + timedelta(seconds=EXPIRACAO)
recursos[uid] = RecursoBody(
codigo_de_acesso=uid,
valor=random.randint(1, 1000),
validade=validade,
)
return {"codigo_de_acesso": uid, "validade": validade}
@app.get("/info")
def get_info():
return glInfo
@app.put("/info")
def update_info(body: InfoBody):
if any(
not att or (isinstance(att, float) and att <= 0.0) for att in body.get_atts()
):
raise HTTPException(
status_code=400, detail="A requisição não contem os dados necessários"
)
else:
global glInfo
if glInfo.status == "offline" and body.status == "online":
threading.Thread(target=eleicao, args=("")).start()
glInfo = body
@app.get("/peers")
def get_peers():
return glPeers
@app.get("/peers/{id}")
def get_peer(id: str):
for d in glPeers:
if d.get("id") == id:
return d
raise HTTPException(404, f"Não encontrado peer com id: {id}")
@app.post("/peers")
def add_peer(body: dict[str, str]):
valid = is_peer_valid(body)
if valid.value == Valid.VALID.value:
glPeers.append(body)
elif valid.value == Valid.INVALID.value:
raise HTTPException(400, "Dados mal formatados")
else:
raise HTTPException(409, "Já existe um peer com esse id ou nome")
@app.put("/peers/{id}")
def update_peer(id: str, body: dict[str, str]):
if is_peer_valid(body).value == Valid.INVALID.value:
raise HTTPException(422, f"Dados invalidos")
for d in glPeers:
if d.get("id") == id:
d.update(body)
return body
raise HTTPException(404, f"Não encontrado peer com id: {id}")
@app.delete("/peers/{id}")
def delete_peer(id: str):
idx = -1
for (i, d) in enumerate(glPeers):
if d.get("id") == id:
idx = i
break
if idx == -1:
raise HTTPException(404, f"Não encontrado peer com id: {id}")
glPeers.pop(idx)
@app.post("/resolver")
def resolver(body: ResolverBody):
if body.operacao == "resolver":
nome = body.arguments.get("nome")
if nome is not None and nome.lower() == "robert":
return {"url": "https://pratica-sd.herokuapp.com/"}
@app.get("/coordenador")
def get_coordenador():
return coordenador
@app.get("/fruits")
def app_get():
return ["Apple", "Banana", "Orange"]
@app.get("/clients")
def app_clientes_get():
return ["Mathias", "José", "Thiago"]
@app.post("/echo")
def app_post(echo=None):
if echo is None:
return "Echo."
else:
f"Echo {echo}."
def main():
config = Config(app=app, host="0.0.0.0", port=PORT, debug=True)
server = Server(config=config)
server.run()
global glInfo
if glInfo.status != "offline":
log(
"Attention",
"Servico iniciado",
"O servico foi inicializado, uma eleicao ira ocorrer em breve",
)
eleicao("")
# setInterval(interval_check, check_coordenador)
p = Periodic(check_coordenador, 2)
loop = asyncio.get_event_loop()
loop.run_until_complete(p.start())
def eleicao(
id_eleicao_atual: str, dados: Optional[list[str]] = None, recebido: bool = False
):
if glInfo.status == "offline":
return
if id_eleicao_atual == "":
id_eleicao_atual = uuid4().__str__()
eleicoes.add(id_eleicao_atual)
log(
"Success",
f"{'Recebido' if recebido else 'Iniciado nova'} eleicao",
f"Eleicao: {id_eleicao_atual}. Tipo de eleicao: {glInfo.tipo_de_eleicao_ativa}",
)
print(
f"Iniciando nova eleicao: {id_eleicao_atual}. Tipo: {glInfo.tipo_de_eleicao_ativa}"
)
if glInfo.tipo_de_eleicao_ativa == "anel":
eleicao_ring(id_eleicao_atual, dados)
else:
eleicao_bully(id_eleicao_atual)
def cycle(iterable: Iterator):
it = iter(iterable)
while True:
v = next(it)
if v.get("id") == myId:
break
while True:
try:
yield next(it)
except StopIteration:
it = iter(iterable)
yield next(it)
def eleicao_ring(id_eleicao_atual: str, dados: Optional[list[str]] = None):
global glPeers
print(f"dados: {dados}")
if dados is not None and dados.__contains__(myId):
maxId = functools.reduce(
lambda acc, id: max(int(acc), int(id) if id != "" else 0), dados, 0
)
print("maxId: ", maxId)
end_election(id_eleicao_atual, str(maxId))
else:
if dados is None:
dados = [myId]
else:
dados.append(myId)
peers = glPeers.copy()
peers.sort(key=lambda x: int(x.get("id")))
print(peers)
for peer in cycle(peers):
# if peer["id"] == myId:
# continue
res = requests.post(
f"{peer['url']}eleicao", json={"id": id_eleicao_atual, "dados": dados}
)
if res.status_code == 200:
print(f"Enviado eleicao para {peer['id']}({peer['nome']})")
break
else:
log(
"Error",
f"Erro ao | |
w : the height of every gaussian
centers: list of tuple optional (default=([1,1],[-1,-1]))
The bounding box for each cluster center when centers are
generated at random.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, is the RandomState instance used by np.random.
Attributes
----------
class_type : 'blobs-regression'
"""
__class_type = 'blobs-regression'
def __init__(self, n_samples=100, cluster_std=(1.0, 1.0),
bias=1.0, w=(2.0, -1.0), centers=([0, 0], [-1, -1]),
random_state=None):
self.n_samples = n_samples
self.bias = bias
self.w = w
self.centers = centers
self.cluster_std = cluster_std
self.random_state = random_state
def _dts_function(self, X):
""" TODO: Put a comment for this function. """
from secml.ml.stats import CDistributionGaussian
d = X.shape[1] # number of features
Y = self.bias
for gauss_idx in range(len(self.centers)):
Y += self.w[gauss_idx] * \
CDistributionGaussian(mean=self.centers[gauss_idx],
cov=self.cluster_std[gauss_idx] *
CArray.eye(d, d)).pdf(X)
return Y
def load(self):
"""Loads the dataset.
Returns
-------
dataset : CDataset
The randomly generated dataset.
"""
from sklearn.datasets import make_blobs
patterns = make_blobs(
n_samples=self.n_samples, n_features=2, centers=self.centers,
cluster_std=self.cluster_std, random_state=self.random_state)[0]
return CDataset(patterns, self._dts_function(CArray(patterns)))
class CDLRandomCircles(CDataLoader):
"""Make a large circle containing a smaller circle in 2d.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, is the RandomState instance used by np.random.
Attributes
----------
class_type : 'circles'
"""
__class_type = 'circles'
def __init__(self, n_samples=100, noise=None,
factor=0.8, random_state=None):
self.n_samples = n_samples
self.noise = noise
self.factor = factor
self.random_state = random_state
def load(self):
"""Loads the dataset.
Returns
-------
dataset : CDataset
The randomly generated dataset.
"""
from sklearn.datasets import make_circles
patterns, labels = make_circles(
n_samples=self.n_samples,
noise=self.noise,
factor=self.factor,
random_state=self.random_state)
return CDataset(patterns, labels)
class CDLRandomCircleRegression(CDataLoader):
"""Make a large circle containing a smaller circle in 2d.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, is the RandomState instance used by np.random.
Attributes
----------
class_type : 'circles-regression'
"""
__class_type = 'circles-regression'
def __init__(self, n_samples=100, noise=None,
factor=0.8, random_state=None):
self.n_samples = n_samples
self.noise = noise
self.factor = factor
self.random_state = random_state
def _dts_function(self, X):
"""TODO: Add comment for this function!"""
return X[:, 0] ** 2 + X[:, 1] ** 2
def load(self):
"""Loads the dataset.
Returns
-------
dataset : CDataset
The randomly generated dataset.
"""
from sklearn.datasets import make_circles
patterns = make_circles(
n_samples=self.n_samples,
noise=self.noise,
factor=self.factor,
random_state=self.random_state)[0]
return CDataset(patterns, self._dts_function(patterns))
class CDLRandomMoons(CDataLoader):
"""Make two interleaving half circles.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, is the RandomState instance used by np.random.
Attributes
----------
class_type : 'moons'
"""
__class_type = 'moons'
def __init__(self, n_samples=100, noise=None, random_state=None):
self.n_samples = n_samples
self.noise = noise
self.random_state = random_state
def load(self):
"""Loads the dataset.
Returns
-------
dataset : CDataset
The randomly generated dataset.
"""
from sklearn.datasets import make_moons
patterns, labels = make_moons(
n_samples=self.n_samples,
noise=self.noise,
random_state=self.random_state)
return CDataset(patterns, labels)
class CDLRandomBinary(CDataLoader):
"""Generate random binary data.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
n_features : int, optional (default=2)
The total number of features
Attributes
----------
class_type : 'binary'
"""
__class_type = 'binary'
def __init__(self, n_samples=100, n_features=2):
self.n_samples = n_samples
self.n_features = n_features
def load(self):
"""Loads the dataset.
Returns
-------
dataset : CDataset
The randomly generated dataset.
"""
patterns = CArray.randint(2, shape=(self.n_samples, self.n_features))
labels = CArray.randint(2, shape=(1, self.n_samples))
return CDataset(patterns, labels)
class CDLRandomToy(CDataLoader, metaclass=ABCMeta):
"""Loads a random toy dataset (abstract interface).
Available toy datasets:
- iris (classification) -> `CDLIris`
- digits (classification) -> `CDLDigits`
- boston (regression) -> `CDLBoston`
- diabetes (regression) -> `CDLDiabetes`
Parameters
----------
class_list : list of string (default None)
Each string is the name of data's class that we want
in the new dataset. If None every class will be keep
zero_one : bool
If is true, and class list is equal to two, will be
assigned 0 at the label with lower value, 1 to the other.
"""
__lock = Lock() # Lock to prevent multiple parallel download/extraction
def __init__(self, class_list=None, zero_one=False):
self.class_list = class_list
self.zero_one = zero_one
@property
@abstractmethod
def toy(self):
"""Identifier of the toy dataset."""
raise NotImplementedError
def _select_classes(self, class_list, patterns, labels):
sel_patterns = None
sel_labels = None
for single_class in class_list:
this_class_pat_idx = labels.find(labels == single_class)
if sel_patterns is None:
sel_patterns = patterns[this_class_pat_idx, :]
sel_labels = labels[this_class_pat_idx]
else:
sel_patterns = sel_patterns.append(
patterns[this_class_pat_idx, :], axis=0)
sel_labels = sel_labels.append(
labels[this_class_pat_idx])
if self.zero_one is True:
if len(class_list) > 2:
raise ValueError("you are try to convert to 0 1 label for a "
"dataset with more than 2 classes")
else:
class_list.sort()
sel_labels[sel_labels == class_list[0]] = 0
sel_labels[sel_labels == class_list[1]] = 1
return CDataset(sel_patterns, sel_labels)
def load(self):
"""Loads the dataset.
Returns
-------
dataset : CDataset
The randomly generated dataset.
"""
with CDLRandomToy.__lock:
if self.toy == 'iris':
from sklearn.datasets import load_iris
toy_data = load_iris()
elif self.toy == 'digits':
from sklearn.datasets import load_digits
toy_data = load_digits()
elif self.toy == 'boston':
from sklearn.datasets import load_boston
toy_data = load_boston()
elif self.toy == 'diabetes':
from sklearn.datasets import load_diabetes
toy_data = load_diabetes()
else:
raise ValueError("toy dataset {:} if not available.".format(self.toy))
# Returning a CDataset
if self.class_list is None:
return CDataset(CArray(toy_data.data), CArray(toy_data.target))
else:
return self._select_classes(self.class_list,
CArray(toy_data.data),
CArray(toy_data.target))
class CDLIris(CDLRandomToy):
"""Loads Iris dataset.
The iris dataset is a classic and very easy multi-class
classification dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Parameters
----------
class_list : list of str (default None)
Each string is the name of data's class that we want
in the new dataset. If None every class will be keep
zero_one : bool
If is true, and class list is equal to two, will be
assigned 0 at the label with lower value, 1 to the other.
Attributes
----------
class_type : 'iris'
"""
__class_type = 'iris'
toy = 'iris'
class CDLDigits(CDLRandomToy):
"""Loads Digits dataset.
The digits dataset is a classic and very easy multi-class
classification dataset. Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
class_list : list of str (default None)
Each string is the name of data's class that we want
in the new dataset. If None every class will be keep
zero_one : bool
If is true, and class list is equal to two, will be
assigned 0 at the label with lower value, 1 to the other.
Attributes
----------
class_type : 'digits'
"""
__class_type = 'digits'
toy = 'digits'
class CDLBoston(CDLRandomToy):
"""Loads Boston dataset.
Boston house-prices dataset, useful for regression.
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Parameters
----------
class_list : list of str (default None)
Each string is the name of data's class that we want
in the new dataset. If None every class will be keep
zero_one : bool
If is true, and class list is equal to two, will be
assigned 0 at the label with lower value, | |
checks
# in such case, it will error when accessing the .shape attribute.
is_param_lazy = torch.nn.parameter.is_lazy(param)
# Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1:
input_param = input_param[0]
if not is_param_lazy and input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, '
'the shape in current model is {}.'
.format(key, input_param.shape, param.shape))
continue
try:
with torch.no_grad():
param.copy_(input_param)
except Exception as ex:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}, '
'an exception occurred : {}.'
.format(key, param.size(), input_param.size(), ex.args))
elif strict:
missing_keys.append(key)
extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX
if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state:
if extra_state_key in state_dict:
self.set_extra_state(state_dict[extra_state_key])
elif strict:
missing_keys.append(extra_state_key)
elif strict and (extra_state_key in state_dict):
unexpected_keys.append(extra_state_key)
if strict:
for key in state_dict.keys():
if key.startswith(prefix) and key != extra_state_key:
input_name = key[len(prefix):]
input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child
if input_name not in self._modules and input_name not in local_state:
unexpected_keys.append(key)
def load_state_dict(self, state_dict: 'OrderedDict[str, Tensor]',
strict: bool = True):
r"""Copies parameters and buffers from :attr:`state_dict` into
this module and its descendants. If :attr:`strict` is ``True``, then
the keys of :attr:`state_dict` must exactly match the keys returned
by this module's :meth:`~torch.nn.Module.state_dict` function.
Args:
state_dict (dict): a dict containing parameters and
persistent buffers.
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
Note:
If a parameter or buffer is registered as ``None`` and its corresponding key
exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a
``RuntimeError``.
"""
missing_keys: List[str] = []
unexpected_keys: List[str] = []
error_msgs: List[str] = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
# mypy isn't aware that "_metadata" exists in state_dict
state_dict._metadata = metadata # type: ignore[attr-defined]
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(self)
del load
if strict:
if len(unexpected_keys) > 0:
error_msgs.insert(
0, 'Unexpected key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in unexpected_keys)))
if len(missing_keys) > 0:
error_msgs.insert(
0, 'Missing key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in missing_keys)))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, "\n\t".join(error_msgs)))
return _IncompatibleKeys(missing_keys, unexpected_keys)
def _named_members(self, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or v in memo:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
r"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Args:
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
Parameter: module parameter
Example::
>>> for param in model.parameters():
>>> print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
"""
for name, param in self.named_parameters(recurse=recurse):
yield param
def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Parameter]]:
r"""Returns an iterator over module parameters, yielding both the
name of the parameter as well as the parameter itself.
Args:
prefix (str): prefix to prepend to all parameter names.
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
(string, Parameter): Tuple containing the name and parameter
Example::
>>> for name, param in self.named_parameters():
>>> if name in ['bias']:
>>> print(param.size())
"""
gen = self._named_members(
lambda module: module._parameters.items(),
prefix=prefix, recurse=recurse)
for elem in gen:
yield elem
def buffers(self, recurse: bool = True) -> Iterator[Tensor]:
r"""Returns an iterator over module buffers.
Args:
recurse (bool): if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module.
Yields:
torch.Tensor: module buffer
Example::
>>> for buf in model.buffers():
>>> print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
"""
for _, buf in self.named_buffers(recurse=recurse):
yield buf
def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:
r"""Returns an iterator over module buffers, yielding both the
name of the buffer as well as the buffer itself.
Args:
prefix (str): prefix to prepend to all buffer names.
recurse (bool): if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module.
Yields:
(string, torch.Tensor): Tuple containing the name and buffer
Example::
>>> for name, buf in self.named_buffers():
>>> if name in ['running_var']:
>>> print(buf.size())
"""
gen = self._named_members(
lambda module: module._buffers.items(),
prefix=prefix, recurse=recurse)
for elem in gen:
yield elem
def children(self) -> Iterator['Module']:
r"""Returns an iterator over immediate children modules.
Yields:
Module: a child module
"""
for name, module in self.named_children():
yield module
def named_children(self) -> Iterator[Tuple[str, 'Module']]:
r"""Returns an iterator over immediate children modules, yielding both
the name of the module as well as the module itself.
Yields:
(string, Module): Tuple containing a name and child module
Example::
>>> for name, module in model.named_children():
>>> if name in ['conv4', 'conv5']:
>>> print(module)
"""
memo = set()
for name, module in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield name, module
def modules(self) -> Iterator['Module']:
r"""Returns an iterator over all modules in the network.
Yields:
Module: a module in the network
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
Example::
>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
print(idx, '->', m)
0 -> Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True)
"""
for _, module in self.named_modules():
yield module
def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = '', remove_duplicate: bool = True):
r"""Returns an iterator over all modules in the network, yielding
both the name of the module as well as the module itself.
Args:
memo: a memo to store the set of modules already added to the result
prefix: a prefix that will be added to the name of the module
remove_duplicate: whether to remove the duplicated module instances in the result
or not
Yields:
(string, Module): Tuple of name and module
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
Example::
>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
print(idx, '->', m)
0 -> ('', Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
"""
if memo is None:
memo = set()
if self not in memo:
if remove_duplicate:
memo.add(self)
yield prefix, self
for name, module in self._modules.items():
if module is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
for m in module.named_modules(memo, submodule_prefix, remove_duplicate):
yield m
def train(self: T, mode: bool = True) -> T:
r"""Sets the module in training mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, | |
#!/usr/bin/env python
"""
Test code for the BBox Object
"""
import numpy as np
import pytest
from geometry_utils.bound_box import (BBox,
asBBox,
NullBBox,
InfBBox,
fromBBArray,
from_points,
)
class TestConstructors():
def test_creates(self):
B = BBox(((0, 0), (5, 5)))
assert isinstance(B, BBox)
def test_type(self):
B = np.array(((0, 0), (5, 5)))
assert not isinstance(B, BBox)
def testDataType(self):
B = BBox(((0, 0), (5, 5)))
assert B.dtype == np.float
def testShape(self):
B = BBox((0, 0, 5, 5))
assert B.shape == (2, 2)
def testShape2(self):
with pytest.raises(ValueError):
BBox((0, 0, 5))
def testShape3(self):
with pytest.raises(ValueError):
BBox((0, 0, 5, 6, 7))
def testArrayConstruction(self):
A = np.array(((4, 5), (10, 12)), np.float_)
B = BBox(A)
assert isinstance(B, BBox)
def testMinMax(self):
with pytest.raises(ValueError):
BBox((0, 0, -1, 6))
def testMinMax2(self):
with pytest.raises(ValueError):
BBox((0, 0, 1, -6))
def testMinMax3(self):
# OK to have a zero-sized BB
B = BBox(((0, 0), (0, 5)))
assert isinstance(B, BBox)
def testMinMax4(self):
# OK to have a zero-sized BB
B = BBox(((10., -34), (10., -34.0)))
assert isinstance(B, BBox)
def testMinMax5(self):
# OK to have a tiny BB
B = BBox(((0, 0), (1e-20, 5)))
assert isinstance(B, BBox)
def testMinMax6(self):
# Should catch tiny difference
with pytest.raises(ValueError):
BBox(((0, 0), (-1e-20, 5)))
class TestAsBBox():
def testPassThrough(self):
B = BBox(((0, 0), (5, 5)))
C = asBBox(B)
assert B is C
def testPassThrough2(self):
B = ((0, 0), (5, 5))
C = asBBox(B)
assert B is not C
def testPassArray(self):
# Different data type
A = np.array(((0, 0), (5, 5)))
C = asBBox(A)
assert A is not C
def testPassArray2(self):
# same data type -- should be a view
A = np.array(((0, 0), (5, 5)), np.float_)
C = asBBox(A)
A[0, 0] = -10
assert C[0, 0] == A[0, 0]
class TestIntersect():
def testSame(self):
B = BBox(((-23.5, 456), (56, 532.0)))
C = BBox(((-23.5, 456), (56, 532.0)))
assert B.Overlaps(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert B.Overlaps(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert B.Overlaps(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert B.Overlaps(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert B.Overlaps(C)
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not B.Overlaps(C)
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not B.Overlaps(C)
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not B.Overlaps(C)
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not B.Overlaps(C)
def testInside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-12, -22), (-6, -8)))
assert B.Overlaps(C)
def testOutside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-17, -26), (3, 0)))
assert B.Overlaps(C)
def testTouch(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 8), (17.95, 32)))
assert B.Overlaps(C)
def testCorner(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (17.95, 32)))
assert B.Overlaps(C)
def testZeroSize(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (15, 25)))
assert B.Overlaps(C)
def testZeroSize2(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((15, 25), (15, 25)))
assert not B.Overlaps(C)
def testZeroSize3(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((0, 8), (10, 12)))
assert B.Overlaps(C)
def testZeroSize4(self):
B = BBox(((5, 1), (10, 25)))
C = BBox(((8, 8), (8, 8)))
assert B.Overlaps(C)
class TestEquality():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B == C
def testIdentical(self):
B = BBox(((1.0, 2.0), (5., 10.)))
assert B == B
def testNotSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.1)))
assert not B == C
def testWithArray(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert B == C
def testWithArray2(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert C == B
def testWithArray3(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.01, 2.0), (5., 10.)))
assert not C == B
class TestInside():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B.Inside(C)
def testPoint(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((3.0, 4.0), (3.0, 4.0)))
assert B.Inside(C)
def testPointOutside(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((-3.0, 4.0), (0.10, 4.0)))
assert not B.Inside(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert not B.Inside(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert not B.Inside(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert not B.Inside(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert not (B.Inside(C))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not (B.Inside(C))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not (B.Inside(C))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not (B.Inside(C))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not (B.Inside(C))
class TestPointInside():
def testPointIn(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 4.0)
assert (B.PointInside(P))
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 30)
assert not (B.PointInside(P))
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 30)
assert not (B.PointInside(P))
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 4)
assert not (B.PointInside(P))
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (-10, 5)
assert not (B.PointInside(P))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 5)
assert not (B.PointInside(P))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 25.001)
assert not (B.PointInside(P))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 12)
assert not (B.PointInside(P))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
P = (17.1, 12.3)
assert not (B.PointInside(P))
def testPointOnTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 10.)
assert (B.PointInside(P))
def testPointLeftTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (-3.0, 10.)
assert not (B.PointInside(P))
def testPointOnBottomLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 5.)
assert (B.PointInside(P))
def testPointOnLeft(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-10, -5.)
assert (B.PointInside(P))
def testPointOnRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -5.)
assert (B.PointInside(P))
def testPointOnBottomRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -10.)
assert (B.PointInside(P))
class Test_from_points():
def testCreate(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)), np.float64)
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testCreateInts(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)))
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testSinglePoint(self):
Pts = np.array((5, 2), np.float_)
B = from_points(Pts)
assert (B[0, 0] == 5. and
B[0, 1] == 2.0 and
B[1, 0] == 5. and
B[1, 1] == 2.0)
def testListTuples(self):
Pts = [(3, 6.5), (13, 43.2), (-4.32, -4), (65, -23), (-0.0001,
23.432)]
B = from_points(Pts)
assert (B[0, 0] == -4.32 and
B[0, 1] == -23.0 and
B[1, 0] == 65.0 and
B[1, 1] == 43.2)
class TestMerge():
A = BBox(((-23.5, 456), (56, 532.0)))
B = BBox(((-20.3, 460), (54, 465))) # B should be completely inside A
C = BBox(((-23.5, 456), (58, 540.))) # up and to the right or A
D = BBox(((-26.5, 12), (56, 532.0)))
def testInside(self):
C = self.A.copy()
C.Merge(self.B)
assert (C == self.A)
def testFullOutside(self):
C = self.B.copy()
C.Merge(self.A)
assert (C == self.A)
def testUpRight(self):
A = self.A.copy()
A.Merge(self.C)
assert (A[0] == self.A[0] and A[1] == self.C[1])
def testDownLeft(self):
A = self.A.copy()
A.Merge(self.D)
assert (A[0] == self.D[0] and A[1] == self.A[1])
class TestWidthHeight():
B | |
<gh_stars>1-10
import itertools
from field.point import Point
from field.field import Field
from move.move import Move
from move.move_type import MoveType
import numpy as np
from config import config
class GameState:
""" A state of the game, i.e. the game board. These are the only functions which are
absolutely necessary to implement UCT in any 2-player complete information deterministic
zero-sum game, although they can be enhanced and made quicker, for example by using a
GetRandomMove() function to generate a random move during rollout.
By convention the players are numbered 1 and 2.
"""
def __init__(self):
self.playerJustMoved = 2 # At the root pretend the player just moved is player 2 - player 1 has the first move
def Clone(self):
""" Create a deep clone of this game state.
"""
st = GameState()
st.playerJustMoved = self.playerJustMoved
return st
def DoMove(self, move):
""" Update a state by carrying out the given move.
Must update playerJustMoved.
"""
self.playerJustMoved = 3 - self.playerJustMoved
def GetMoves(self):
""" Get all possible moves from this state.
"""
def GetResult(self, playerjm):
""" Get the game result from the viewpoint of playerjm.
"""
def __repr__(self):
""" Don't need this - but good style.
"""
pass
class GOLADState(GameState):
pass
""" A state of the game of GOLAD, i.e. the game board.
The board is a 2D array where 0 = empty (.), 1 = player 0 (0), 2 = player 1 (1).
"""
def __init__(self, field):
self.current_player = 0
self.timestep = 0
self.terminal = 0 # 0 (not done), 1 (player0 wins), 2 (player1 wins), 3 (tie)
self.field = field
self.max_timestep = config.max_ep_length
def Clone(self):
""" Create a deep clone of this game state.
"""
st = GOLADState(self.field.Clone())
st.current_player = self.current_player
return st
def Convert(self):
# Convert the state to the state representation expected by the neural network in nn/nn.py
cells = self.field.cells
state = np.zeros((self.field.width, self.field.height, 3))
for i in range(self.field.width):
for j in range(self.field.height):
if cells[i][j] == '0':
state[i,j,0] = 1
elif cells[i][j] == '1':
state[i,j,1] = 1
state[i,j,2] = self.current_player
return state
def DoMove(self, move):
""" Update a state by carrying out the given move.
Must update playerToMove.
"""
# Apply cell change
if move.move_type == MoveType.KILL:
self.field.cells[move.target_point.x][move.target_point.y] = '.'
elif move.move_type == MoveType.BIRTH:
self.field.cells[move.target_point.x][move.target_point.y] = str(self.current_player)
self.field.cells[move.sacrifice_points[0].x][move.sacrifice_points[0].y] = '.'
self.field.cells[move.sacrifice_points[1].x][move.sacrifice_points[1].y] = '.'
elif move.move_type == MoveType.PASS:
pass
# Simulate the game for 1 step
cell_map = self.field.get_cell_mapping()
dead_cells = cell_map.get('.', [])
my_cells = cell_map.get(str(self.current_player), [])
opp_cells = cell_map.get(str(1 - self.current_player), [])
living_cells = my_cells + opp_cells
new_field = self.field.Clone()
for cell in living_cells:
count = self.field.count_neighbors(cell.x, cell.y)
if count[0] < 2 or count[0] > 3:
new_field.cells[cell.x][cell.y] = '.'
for cell in dead_cells:
count = self.field.count_neighbors(cell.x, cell.y)
if count[0] == 3:
new_field.cells[cell.x][cell.y] = '0' if count[1]>count[2] else '1'
self.field = new_field
# Only increment timestep counter after Player1's turn
if self.current_player == 1:
self.timestep += 1
# Flip turn player
self.current_player = 1 - self.current_player
# Update self.terminal
cell_map = self.field.get_cell_mapping()
cells_0 = cell_map.get('0', [])
cells_1 = cell_map.get('1', [])
if (len(cells_0) > 0) and (len(cells_1) <= 0):
self.terminal = 1
elif (len(cells_0) <= 0) and (len(cells_1) > 0):
self.terminal = 2
elif (len(cells_0) <= 0) and (len(cells_1) <= 0):
self.terminal = 3
elif self.timestep >= self.max_timestep:
self.terminal = 3
def GetMoves(self, do_rand_birth=config.do_rand_birth):
""" Get all possible moves from this state.
"""
moves = []
if self.terminal != 0:
return []
# curr_player_cell = "0" if self.current_player==0 else "1"
# cells_empty = []
# cells_self = []
# for i in range(self.width):
# for j in range(self.height):
# if cell[i][j] == ".":
# cells_empty.append((i,j))
# elif cell[i][j] == curr_player_cell:
# cells_self.append((i,j))
cell_map = self.field.get_cell_mapping()
dead_cells = cell_map.get('.', [])
my_cells = cell_map.get(str(self.current_player), [])
opp_cells = cell_map.get(str(1 - self.current_player), [])
living_cells = my_cells + opp_cells
# Generate kill moves
for kill_cell in living_cells:
moves.append(Move(MoveType.KILL, kill_cell))
# Generate birth moves
for birth_cell in dead_cells:
if do_rand_birth:
if len(my_cells) > 1:
idx0, idx1 = np.random.choice(len(my_cells), 2, replace=False)
moves.append(Move(MoveType.BIRTH, birth_cell, [my_cells[idx0], my_cells[idx1]]))
else:
for sacrifice_cells in itertools.combinations(my_cells, 2):
moves.append(Move(MoveType.BIRTH, birth_cell, [sacrifice_cells[0], sacrifice_cells[1]]))
# Generate pass move
moves.append(Move(MoveType.PASS))
return moves
def IsOnBoard(self, x, y):
return x >= 0 and x < self.field.width and y >= 0 and y < self.field.height
def CoordsToIndex(self, x, y):
return x*self.field.height + y
def GetP(self, net_probs, valid_moves):
action_logits, birth_logits, sac_logits, kill_logits, _ = net_probs
p = {}
for move in valid_moves:
if move.move_type == MoveType.BIRTH:
p[move] = np.squeeze(action_logits)[0] # assuming batch size of 1
birth_cell = move.target_point
sac_cell1, sac_cell2 = move.sacrifice_points
p[move] = p[move] * np.squeeze(birth_logits)[self.CoordsToIndex(birth_cell.x, birth_cell.y)]
p[move] = p[move] * np.squeeze(sac_logits)[self.CoordsToIndex(sac_cell1.x, sac_cell1.y)]
p[move] = p[move] * np.squeeze(sac_logits)[self.CoordsToIndex(sac_cell2.x, sac_cell2.y)]
elif move.move_type == MoveType.KILL:
p[move] = np.squeeze(action_logits[1])
kill_cell = move.target_point
p[move] = p[move] * np.squeeze(kill_logits)[self.CoordsToIndex(kill_cell.x, kill_cell.y)]
else:
p[move] = np.squeeze(action_logits[2])
return p
def GetV(self, net_probs):
_, _, _, _, v = net_probs
return np.squeeze(v) # assuming batch size of 1
# def GetResult(self, playerjm):
def GetResult(self, player=None):
""" Get the final game result from the viewpoint of player or current_player, if None.
"""
if player is None:
player = self.current_player
cell_map = self.field.get_cell_mapping()
my_cells = cell_map.get(str(player), [])
opp_cells = cell_map.get(str(1 - player), [])
if (len(my_cells) > 0) and (len(opp_cells) <= 0):
return 1.0
elif (len(my_cells) <= 0) and (len(opp_cells) > 0):
return -1.0
else:
return 0.
# jmcount = len([(x,y) for x in range(self.size) for y in range(self.size) if self.board[x][y] == playerjm])
# notjmcount = len([(x,y) for x in range(self.size) for y in range(self.size) if self.board[x][y] == 3 - playerjm])
# if jmcount > notjmcount: return 1.0
# elif notjmcount > jmcount: return 0.0
# else: return 0.5 # draw
def __repr__(self):
s= ""
for y in range(self.size-1,-1,-1):
for x in range(self.size):
s += ".01"[self.board[x][y]]
s += "\n"
return s
class OthelloState(GameState):
pass
""" A state of the game of Othello, i.e. the game board.
The board is a 2D array where 0 = empty (.), 1 = player 1 (X), 2 = player 2 (O).
In Othello players alternately place pieces on a square board - each piece played
has to sandwich opponent pieces between the piece played and pieces already on the
board. Sandwiched pieces are flipped.
This implementation modifies the rules to allow variable sized square boards and
terminates the game as soon as the player about to move cannot make a move (whereas
the standard game allows for a pass move).
"""
def __init__(self,sz = 8):
self.playerJustMoved = 2 # At the root pretend the player just moved is p2 - p1 has the first move
self.board = [] # 0 = empty, 1 = player 1, 2 = player 2
self.size = sz
assert sz == int(sz) and sz % 2 == 0 # size must be integral and even
for y in range(sz):
self.board.append([0]*sz)
self.board[sz/2][sz/2] = self.board[sz/2-1][sz/2-1] = 1
self.board[sz/2][sz/2-1] = self.board[sz/2-1][sz/2] = 2
def Clone(self):
""" Create a deep clone of this game state.
"""
st = OthelloState()
st.playerJustMoved = self.playerJustMoved
st.board = [self.board[i][:] for i in range(self.size)]
st.size = self.size
return st
def DoMove(self, move):
""" Update a state by carrying out the given move.
Must update playerToMove.
"""
(x,y)=(move[0],move[1])
assert x == int(x) and y == int(y) and self.IsOnBoard(x,y) and self.board[x][y] == 0
m = self.GetAllSandwichedCounters(x,y)
self.playerJustMoved = 3 - self.playerJustMoved
self.board[x][y] = self.playerJustMoved
for (a,b) in m:
self.board[a][b] = self.playerJustMoved
def GetMoves(self):
""" Get all possible moves from this state.
"""
return [(x,y) for x in range(self.size) for y in range(self.size) if self.board[x][y] == 0 and self.ExistsSandwichedCounter(x,y)]
def AdjacentToEnemy(self,x,y):
""" Speeds up GetMoves by only considering squares which are adjacent to an enemy-occupied square.
"""
for (dx,dy) in [(0,+1),(+1,+1),(+1,0),(+1,-1),(0,-1),(-1,-1),(-1,0),(-1,+1)]:
if self.IsOnBoard(x+dx,y+dy) and self.board[x+dx][y+dy] == self.playerJustMoved:
return True
return False
def AdjacentEnemyDirections(self,x,y):
""" Speeds up GetMoves by only considering squares which are adjacent to an enemy-occupied square.
"""
es = []
for (dx,dy) in [(0,+1),(+1,+1),(+1,0),(+1,-1),(0,-1),(-1,-1),(-1,0),(-1,+1)]:
if self.IsOnBoard(x+dx,y+dy) and self.board[x+dx][y+dy] == self.playerJustMoved:
es.append((dx,dy))
return es
def ExistsSandwichedCounter(self,x,y):
""" Does there exist at least one counter | |
config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'expr_form': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['expr_form_type'] = 'compound'
pub_load['expr_form'] = load['tgt_type']
else:
return {}
else:
pub_load['expr_form'] = load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**pub_load)
ret['minions'] = self.ckminions.check_minions(
load['tgt'],
pub_load['expr_form'])
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(ret['jid']))
with salt.utils.fopen(jid_fn, 'w+') as fp_:
fp_.write(load['id'])
return ret
def minion_publish(self, load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'expr_form': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tmo' in load:
try:
pub_load['timeout'] = int(load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['tmo'])
log.warn(msg)
return {}
if 'timeout' in load:
try:
pub_load['timeout'] = int(load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['timeout'])
log.warn(msg)
return {}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['expr_form_type'] = 'compound'
else:
return {}
else:
pub_load['expr_form'] = load['tgt_type']
pub_load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**pub_load):
if load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])):
if key not in ret:
ret[key] = val
if load.get('form', '') != 'full':
ret.pop('__jid__')
return ret
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load:
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'],
preserve_minions=load.get('preserve_minion_cache',
False))
return True
class LocalFuncs(object):
'''
Set up methods for use only from the local system
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
# Create the event manager
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
# Make a client
self.local = salt.client.get_local_client(mopts=self.opts)
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def runner(self, load):
'''
Send a master control function back to the runner system
'''
if 'token' in load:
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
if 'eauth' not in load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(load)
if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
load.get('kwarg', {}),
load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def wheel(self, load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in load:
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
jid = salt.utils.jid.gen_jid()
fun = load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
if 'eauth' not in load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(load)
if not ((name in self.opts['external_auth'][load['eauth']]) |
('*' in self.opts['external_auth'][load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][load['eauth']][name]
if name in self.opts['external_auth'][load['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
jid = salt.utils.jid.gen_jid()
fun = load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def mk_token(self, load):
'''
Create and return | |
adjsObj[obj2.id];
}
return this.edges[obj.id][obj2.id];
},
/**
* @param {Array} id
* @return {undefined}
*/
removeNode : function(id) {
if (this.hasNode(id)) {
delete this.nodes[id];
var adjs = this.edges[id];
var to;
for (to in adjs) {
delete this.edges[to][id];
}
delete this.edges[id];
}
},
/**
* @param {?} id2
* @param {?} id1
* @return {undefined}
*/
removeAdjacence : function(id2, id1) {
delete this.edges[id2][id1];
delete this.edges[id1][id2];
},
/**
* @param {?} id
* @return {?}
*/
hasNode : function(id) {
return id in this.nodes;
},
/**
* @return {undefined}
*/
empty : function() {
this.nodes = {};
this.edges = {};
}
});
var Graph = $jit.Graph;
var methods;
(function() {
/**
* @param {string} prefix
* @param {number} prop
* @param {string} type
* @param {?} force
* @param {Array} prefixConfig
* @return {?}
*/
var getDataInternal = function(prefix, prop, type, force, prefixConfig) {
var data;
type = type || "current";
/** @type {string} */
prefix = "$" + (prefix ? prefix + "-" : "");
if (type == "current") {
data = this.data;
} else {
if (type == "start") {
data = this.startData;
} else {
if (type == "end") {
data = this.endData;
}
}
}
/** @type {string} */
var dollar = prefix + prop;
if (force) {
return data[dollar];
}
if (!this.Config.overridable) {
return prefixConfig[prop] || 0;
}
return dollar in data ? data[dollar] : dollar in this.data ? this.data[dollar] : prefixConfig[prop] || 0;
};
/**
* @param {string} prefix
* @param {string} prop
* @param {?} value
* @param {string} type
* @return {undefined}
*/
var setDataInternal = function(prefix, prop, value, type) {
type = type || "current";
/** @type {string} */
prefix = "$" + (prefix ? prefix + "-" : "");
var data;
if (type == "current") {
data = this.data;
} else {
if (type == "start") {
data = this.startData;
} else {
if (type == "end") {
data = this.endData;
}
}
}
data[prefix + prop] = value;
};
/**
* @param {string} prefix
* @param {?} attributes
* @return {undefined}
*/
var removeDataInternal = function(prefix, attributes) {
/** @type {string} */
prefix = "$" + (prefix ? prefix + "-" : "");
var that = this;
$.each(attributes, function(t) {
var pref = prefix + t;
delete that.data[pref];
delete that.endData[pref];
delete that.startData[pref];
});
};
methods = {
/**
* @param {string} x
* @param {Object} callback
* @param {?} force
* @return {?}
*/
getData : function(x, callback, force) {
return getDataInternal.call(this, "", x, callback, force, this.Config);
},
/**
* @param {string} prop
* @param {number} recurring
* @param {string} callback
* @return {undefined}
*/
setData : function(prop, recurring, callback) {
setDataInternal.call(this, "", prop, recurring, callback);
},
/**
* @param {(Array|string)} types
* @param {?} obj
* @return {undefined}
*/
setDataset : function(types, obj) {
types = $.splat(types);
var attr;
for (attr in obj) {
/** @type {number} */
var i = 0;
var prevSources = $.splat(obj[attr]);
var valuesLen = types.length;
for (;i < valuesLen;i++) {
this.setData(attr, prevSources[i], types[i]);
}
}
},
/**
* @return {undefined}
*/
removeData : function() {
removeDataInternal.call(this, "", Array.prototype.slice.call(arguments));
},
/**
* @param {string} prop
* @param {?} type
* @param {?} force
* @return {?}
*/
getCanvasStyle : function(prop, type, force) {
return getDataInternal.call(this, "canvas", prop, type, force, this.Config.CanvasStyles);
},
/**
* @param {string} prop
* @param {?} value
* @param {?} type
* @return {undefined}
*/
setCanvasStyle : function(prop, value, type) {
setDataInternal.call(this, "canvas", prop, value, type);
},
/**
* @param {(Array|string)} types
* @param {Object} obj
* @return {undefined}
*/
setCanvasStyles : function(types, obj) {
types = $.splat(types);
var attr;
for (attr in obj) {
/** @type {number} */
var i = 0;
var prevSources = $.splat(obj[attr]);
var valuesLen = types.length;
for (;i < valuesLen;i++) {
this.setCanvasStyle(attr, prevSources[i], types[i]);
}
}
},
/**
* @return {undefined}
*/
removeCanvasStyle : function() {
removeDataInternal.call(this, "canvas", Array.prototype.slice.call(arguments));
},
/**
* @param {string} property
* @param {?} type
* @param {?} force
* @return {?}
*/
getLabelData : function(property, type, force) {
return getDataInternal.call(this, "label", property, type, force, this.Label);
},
/**
* @param {string} prop
* @param {?} value
* @param {?} type
* @return {undefined}
*/
setLabelData : function(prop, value, type) {
setDataInternal.call(this, "label", prop, value, type);
},
/**
* @param {(Array|string)} types
* @param {Object} obj
* @return {undefined}
*/
setLabelDataset : function(types, obj) {
types = $.splat(types);
var attr;
for (attr in obj) {
/** @type {number} */
var i = 0;
var prevSources = $.splat(obj[attr]);
var valuesLen = types.length;
for (;i < valuesLen;i++) {
this.setLabelData(attr, prevSources[i], types[i]);
}
}
},
/**
* @return {undefined}
*/
removeLabelData : function() {
removeDataInternal.call(this, "label", Array.prototype.slice.call(arguments));
}
};
})();
Graph.Node = new Class({
/**
* @param {?} attributes
* @param {?} klass
* @param {?} Node
* @param {?} Edge
* @param {?} Label
* @return {undefined}
*/
initialize : function(attributes, klass, Node, Edge, Label) {
var innerOptions = {
id : "",
name : "",
data : {},
startData : {},
endData : {},
adjacencies : {},
selected : false,
drawn : false,
exist : false,
angleSpan : {
begin : 0,
end : 0
},
pos : new klass,
startPos : new klass,
endPos : new klass
};
$.extend(this, $.extend(innerOptions, attributes));
this.Config = this.Node = Node;
this.Edge = Edge;
this.Label = Label;
},
/**
* @param {Object} node
* @return {?}
*/
adjacentTo : function(node) {
return node.id in this.adjacencies;
},
/**
* @param {?} id
* @return {?}
*/
getAdjacency : function(id) {
return this.adjacencies[id];
},
/**
* @param {string} expectation
* @return {?}
*/
getPos : function(expectation) {
expectation = expectation || "current";
if (expectation == "current") {
return this.pos;
} else {
if (expectation == "end") {
return this.endPos;
} else {
if (expectation == "start") {
return this.startPos;
}
}
}
},
/**
* @param {?} x
* @param {string} type
* @return {undefined}
*/
setPos : function(x, type) {
type = type || "current";
var pos;
if (type == "current") {
pos = this.pos;
} else {
if (type == "end") {
pos = this.endPos;
} else {
if (type == "start") {
pos = this.startPos;
}
}
}
pos.set(x);
}
});
Graph.Node.implement(methods);
Graph.Adjacence = new Class({
/**
* @param {number} nodeFrom
* @param {?} nodeTo
* @param {Object} data
* @param {?} Edge
* @param {number} Label
* @return {undefined}
*/
initialize : function(nodeFrom, nodeTo, data, Edge, Label) {
/** @type {number} */
this.nodeFrom = nodeFrom;
this.nodeTo = nodeTo;
this.data = data || {};
this.startData = {};
this.endData = {};
this.Config = this.Edge = Edge;
/** @type {number} */
this.Label = Label;
}
});
Graph.Adjacence.implement(methods);
Graph.Util = {
/**
* @param {string} param
* @return {?}
*/
filter : function(param) {
if (!param || !($.type(param) == "string")) {
return function() {
return true;
};
}
var codeSegments = param.split(" ");
return function(searchParams) {
/** @type {number} */
var i = 0;
for (;i < codeSegments.length;i++) {
if (searchParams[codeSegments[i]]) {
return false;
}
}
return true;
};
},
/**
* @param {?} id
* @param {?} i
* @return {?}
*/
getNode : function(id, i) {
return id.nodes[i];
},
/**
* @param {Function} graph
* @param {Function} action
* @param {string} flags
* @return {undefined}
*/
eachNode : function(graph, action, flags) {
var filter = this.filter(flags);
var i;
for (i in graph.nodes) {
if (filter(graph.nodes[i])) {
action(graph.nodes[i]);
}
}
},
/**
* @param {?} opt_attributes
* @param {Function} action
* @param {string} flags
* @return {undefined}
*/
each : function(opt_attributes, action, flags) {
this.eachNode(opt_attributes, action, flags);
},
/**
* @param {Function} node
* @param {Function} action
* @param {string} flags
* @return {undefined}
*/
eachAdjacency : function(node, action, flags) {
var adj = node.adjacencies;
var filter = this.filter(flags);
var id;
for | |
import os
import logging
import copy
import numpy as np
import pandas as pd
import time
import uuid
import json
import operator
from supervised.algorithms.algorithm import BaseAlgorithm
from supervised.algorithms.registry import BINARY_CLASSIFICATION
from supervised.algorithms.registry import MULTICLASS_CLASSIFICATION
from supervised.model_framework import ModelFramework
from supervised.utils.metric import Metric
from supervised.utils.config import LOG_LEVEL
from supervised.utils.additional_metrics import AdditionalMetrics
from supervised.exceptions import NotTrainedException
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
import matplotlib.pyplot as plt
from tabulate import tabulate
from supervised.utils.learning_curves import LearningCurves
class Ensemble:
algorithm_name = "Greedy Ensemble"
algorithm_short_name = "Ensemble"
def __init__(
self,
optimize_metric="logloss",
ml_task=BINARY_CLASSIFICATION,
is_stacked=False,
max_single_prediction_time=None,
):
self.library_version = "0.1"
self.uid = str(uuid.uuid4())
self.metric = Metric({"name": optimize_metric})
self.best_loss = self.metric.get_maximum() # the best loss obtained by ensemble
self.models_map = None
self.selected_models = []
self.train_time = None
self.total_best_sum = None # total sum of predictions, the oof of ensemble
self.target = None
self.target_columns = None
self.sample_weight = None
self._ml_task = ml_task
self._optimize_metric = optimize_metric
self._is_stacked = is_stacked
self._additional_metrics = None
self._threshold = None
self._name = "Ensemble_Stacked" if is_stacked else "Ensemble"
self._scores = []
self.oof_predictions = None
self._oof_predictions_fname = None
self._single_prediction_time = None # prediction time on single sample
self._max_single_prediction_time = max_single_prediction_time
self.model_prediction_time = {}
def get_train_time(self):
return self.train_time
def get_final_loss(self):
return self.best_loss
def is_valid(self):
return len(self.selected_models) > 1
def is_fast_enough(self, max_single_prediction_time):
# dont need to check
if max_single_prediction_time is None:
return True
# no iformation about prediction time
if self._single_prediction_time is None:
return True
return self._single_prediction_time < max_single_prediction_time
def get_type(self):
prefix = "" # "Stacked" if self._is_stacked else ""
return prefix + self.algorithm_short_name
def get_name(self):
return self._name
def get_metric_name(self):
return self.metric.name
def get_metric(self):
return self.metric
def get_out_of_folds(self):
""" Needed when ensemble is treated as model and we want to compute additional metrics for it """
# single prediction (in case of binary classification and regression)
if self.oof_predictions is not None:
return self.oof_predictions.copy(deep=True)
if self._oof_predictions_fname is not None:
self.oof_predictions = pd.read_csv(self._oof_predictions_fname)
return self.oof_predictions.copy(deep=True)
if self.total_best_sum.shape[1] == 1:
tmp_df = pd.DataFrame({"prediction": self.total_best_sum["prediction"]})
tmp_df["target"] = self.target[self.target_columns]
return tmp_df
ensemble_oof = pd.DataFrame(
data=self.total_best_sum,
columns=self.total_best_sum.columns
# [
# "prediction_{}".format(i) for i in range(self.total_best_sum.shape[1])
# ]
)
ensemble_oof["target"] = self.target
if self.sample_weight is not None:
ensemble_oof["sample_weight"] = self.sample_weight
self.oof_predictions = ensemble_oof
return ensemble_oof
def _get_mean(self, oof_selected, best_sum, best_count):
resp = copy.deepcopy(oof_selected)
if best_count > 1:
resp += best_sum
resp /= float(best_count)
return resp
def get_oof_matrix(self, models):
# remember models, will be needed in predictions
self.models_map = {m.get_name(): m for m in models}
if self._max_single_prediction_time is not None:
self.model_prediction_time = {
m.get_name(): m._single_prediction_time for m in models
}
if not [
m for m in models if m.is_fast_enough(self._max_single_prediction_time)
]:
raise NotTrainedException(
"Can't contruct ensemble with prediction time smaller than limit."
)
oofs = {}
for m in models:
# do not use model with RandomFeature
if "RandomFeature" in m.get_name():
continue
# ensemble only the same level of stack
# if m._is_stacked != self._is_stacked:
# continue
oof = m.get_out_of_folds()
prediction_cols = [c for c in oof.columns if "prediction" in c]
oofs[m.get_name()] = oof[prediction_cols] # oof["prediction"]
if self.target is None:
self.target_columns = [c for c in oof.columns if "target" in c]
self.target = oof[
self.target_columns
] # it will be needed for computing advance model statistics
if self.sample_weight is None and "sample_weight" in oof.columns:
self.sample_weight = oof["sample_weight"]
return oofs, self.target, self.sample_weight
def get_additional_metrics(self):
if self._additional_metrics is None:
logger.debug("Get additional metrics for Ensemble")
# 'target' - the target after processing used for model training
# 'prediction' - out of folds predictions of the model
oof_predictions = self.get_out_of_folds()
prediction_cols = [c for c in oof_predictions.columns if "prediction" in c]
target_cols = [c for c in oof_predictions.columns if "target" in c]
oof_preds = oof_predictions[prediction_cols]
if self._ml_task == MULTICLASS_CLASSIFICATION:
cols = oof_preds.columns.tolist()
# prediction_
labels = {i: v[11:] for i, v in enumerate(cols)}
oof_preds["label"] = np.argmax(
np.array(oof_preds[prediction_cols]), axis=1
)
oof_preds["label"] = oof_preds["label"].map(labels)
sample_weight = None
if "sample_weight" in oof_predictions.columns:
sample_weight = oof_predictions["sample_weight"]
self._additional_metrics = AdditionalMetrics.compute(
oof_predictions[target_cols], oof_preds, sample_weight, self._ml_task
)
if self._ml_task == BINARY_CLASSIFICATION:
self._threshold = float(self._additional_metrics["threshold"])
return self._additional_metrics
def fit(self, oofs, y, sample_weight=None):
logger.debug("Ensemble.fit")
start_time = time.time()
selected_algs_cnt = 0 # number of selected algorithms
self.best_algs = [] # selected algoritms indices from each loop
total_prediction_time = 0
best_sum = None # sum of best algorihtms
for j in range(len(oofs)): # iterate over all solutions
min_score = self.metric.get_maximum()
best_model = None
# try to add some algorithm to the best_sum to minimize metric
for model_name in oofs.keys():
if (
self._max_single_prediction_time
and model_name in self.model_prediction_time
):
if (
total_prediction_time + self.model_prediction_time[model_name]
> self._max_single_prediction_time
):
continue
y_ens = self._get_mean(oofs[model_name], best_sum, j + 1)
score = self.metric(y, y_ens, sample_weight)
if self.metric.improvement(previous=min_score, current=score):
min_score = score
best_model = model_name
if best_model is None:
continue
# there is improvement, save it
# save scores for plotting learning curve
# if we optimize negative, then we need to multiply by -1.0
# to save correct values in the learning curve
sign = -1.0 if Metric.optimize_negative(self.metric.name) else 1.0
self._scores += [sign * min_score]
if self.metric.improvement(previous=self.best_loss, current=min_score):
self.best_loss = min_score
selected_algs_cnt = j
self.best_algs.append(best_model) # save the best algoritm
# update best_sum value
best_sum = (
oofs[best_model] if best_sum is None else best_sum + oofs[best_model]
)
if j == selected_algs_cnt:
self.total_best_sum = copy.deepcopy(best_sum)
# update prediction time estimate
if self._max_single_prediction_time is not None:
total_prediction_time = np.sum(
[
self.model_prediction_time[name]
for name in np.unique(self.best_algs)
]
)
# end of main loop #
if not self.best_algs:
raise NotTrainedException("Ensemble wasn't fitted.")
# keep oof predictions of ensemble
self.total_best_sum /= float(selected_algs_cnt + 1)
self.best_algs = self.best_algs[: (selected_algs_cnt + 1)]
logger.debug("Selected models for ensemble:")
for model_name in np.unique(self.best_algs):
self.selected_models += [
{
"model": self.models_map[model_name],
"repeat": float(self.best_algs.count(model_name)),
}
]
logger.debug(f"{model_name} {self.best_algs.count(model_name)}")
self._additional_metrics = self.get_additional_metrics()
self.train_time = time.time() - start_time
def predict(self, X, X_stacked=None):
logger.debug(
"Ensemble.predict with {} models".format(len(self.selected_models))
)
y_predicted_ensemble = None
total_repeat = 0.0
for selected in self.selected_models:
model = selected["model"]
repeat = selected["repeat"]
total_repeat += repeat
if model._is_stacked:
y_predicted_from_model = model.predict(X_stacked)
else:
y_predicted_from_model = model.predict(X)
prediction_cols = []
if self._ml_task in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION]:
prediction_cols = [
c for c in y_predicted_from_model.columns if "prediction_" in c
]
else: # REGRESSION
prediction_cols = ["prediction"]
y_predicted_from_model = y_predicted_from_model[prediction_cols]
y_predicted_ensemble = (
y_predicted_from_model * repeat
if y_predicted_ensemble is None
else y_predicted_ensemble + y_predicted_from_model * repeat
)
y_predicted_ensemble /= total_repeat
if self._ml_task == MULTICLASS_CLASSIFICATION:
cols = y_predicted_ensemble.columns.tolist()
# prediction_
labels = {i: v[11:] for i, v in enumerate(cols)}
y_predicted_ensemble["label"] = np.argmax(
np.array(y_predicted_ensemble[prediction_cols]), axis=1
)
y_predicted_ensemble["label"] = y_predicted_ensemble["label"].map(labels)
return y_predicted_ensemble
def to_json(self):
models_json = []
for selected in self.selected_models:
model = selected["model"]
repeat = selected["repeat"]
models_json += [{"model": model.to_json(), "repeat": repeat}]
json_desc = {
"library_version": self.library_version,
"algorithm_name": self.algorithm_name,
"algorithm_short_name": self.algorithm_short_name,
"uid": self.uid,
"models": models_json,
}
return json_desc
def from_json(self, json_desc):
self.library_version = json_desc.get("library_version", self.library_version)
self.algorithm_name = json_desc.get("algorithm_name", self.algorithm_name)
self.algorithm_short_name = json_desc.get(
"algorithm_short_name", self.algorithm_short_name
)
self.uid = json_desc.get("uid", self.uid)
self.selected_models = []
models_json = json_desc.get("models")
for selected in models_json:
model = selected["model"]
repeat = selected["repeat"]
il = ModelFramework(model.get("params"))
il.from_json(model)
self.selected_models += [
# {"model": LearnerFactory.load(model), "repeat": repeat}
{"model": il, "repeat": repeat}
]
def save(self, results_path, model_subpath):
model_path = os.path.join(results_path, model_subpath)
logger.info(f"Save the ensemble to {model_path}")
predictions = self.get_out_of_folds()
predictions_fname = os.path.join(model_subpath, f"predictions_ensemble.csv")
self._oof_predictions_fname = os.path.join(results_path, predictions_fname)
predictions.to_csv(self._oof_predictions_fname, index=False)
with open(os.path.join(model_path, "ensemble.json"), "w") as fout:
ms = []
for selected in self.selected_models:
ms += [{"model": selected["model"]._name, "repeat": selected["repeat"]}]
desc = {
"name": self._name,
"ml_task": self._ml_task,
"optimize_metric": self._optimize_metric,
"selected_models": ms,
"predictions_fname": predictions_fname,
"metric_name": self.get_metric_name(),
"final_loss": self.get_final_loss(),
"train_time": self.get_train_time(),
"is_stacked": self._is_stacked,
}
if self._threshold is not None:
desc["threshold"] = self._threshold
fout.write(json.dumps(desc, indent=4))
LearningCurves.plot_for_ensemble(self._scores, self.metric.name, model_path)
# call additional metics just to be sure they are computed
self._additional_metrics = self.get_additional_metrics()
AdditionalMetrics.save(
self._additional_metrics, self._ml_task, self.model_markdown(), model_path
)
with open(os.path.join(model_path, "status.txt"), "w") as fout:
fout.write("ALL OK!")
def model_markdown(self):
select_models_desc = []
for selected in self.selected_models:
select_models_desc += [
{"model": selected["model"]._name, "repeat": selected["repeat"]}
]
desc = f"# Summary of {self.get_name()}\n\n"
desc += "[<< Go back](../README.md)\n\n"
desc += "\n## Ensemble structure\n"
selected = pd.DataFrame(select_models_desc)
desc += tabulate(selected.values, ["Model", "Weight"], tablefmt="pipe")
desc += "\n"
return desc
@staticmethod
def load(results_path, model_subpath, models_map):
model_path = os.path.join(results_path, model_subpath)
logger.info(f"Loading ensemble from {model_path}")
json_desc = json.load(open(os.path.join(model_path, "ensemble.json")))
ensemble = Ensemble(json_desc.get("optimize_metric"), json_desc.get("ml_task"))
ensemble._name = json_desc.get("name", ensemble._name)
ensemble._threshold = json_desc.get("threshold", | |
provided.
NotImplementedError: If ``auto_current_user_add`` is provided.
"""
_auto_current_user = False
_auto_current_user_add = False
def __init__(
self,
name=None,
*,
auto_current_user=None,
auto_current_user_add=None,
indexed=None,
repeated=None,
required=None,
default=None,
choices=None,
validator=None,
verbose_name=None,
write_empty_list=None
):
super(UserProperty, self).__init__(
name=name,
indexed=indexed,
repeated=repeated,
required=required,
default=default,
choices=choices,
validator=validator,
verbose_name=verbose_name,
write_empty_list=write_empty_list,
)
if auto_current_user is not None:
raise NotImplementedError(
"The auto_current_user argument is no longer supported."
)
if auto_current_user_add is not None:
raise NotImplementedError(
"The auto_current_user_add argument is no longer supported."
)
def _validate(self, value):
"""Validate a ``value`` before setting it.
Args:
value (User): The value to check.
Raises:
.BadValueError: If ``value`` is not a :class:`User`.
"""
if not isinstance(value, User):
raise exceptions.BadValueError(
"Expected User, got {!r}".format(value)
)
def _prepare_for_put(self, entity):
"""Pre-put hook
This is a no-op. In previous versions of ``ndb``, this method
populated the value based on ``auto_current_user`` or
``auto_current_user_add``, but these flags have been disabled.
Args:
entity (Model): An entity with values.
"""
def _db_set_value(self, v, p, value):
"""Helper for :meth:`_serialize`.
Raises:
NotImplementedError: Always. This method is virtual.
"""
raise NotImplementedError
def _db_get_value(self, v, unused_p):
"""Helper for :meth:`_deserialize`.
Raises:
NotImplementedError: Always. This method is deprecated.
"""
raise exceptions.NoLongerImplementedError()
class KeyProperty(Property):
"""A property that contains :class:`.Key` values.
The constructor for :class:`KeyProperty` allows at most two positional
arguments. Any usage of :data:`None` as a positional argument will
be ignored. Any of the following signatures are allowed:
.. testsetup:: key-property-constructor
from google.cloud import ndb
class SimpleModel(ndb.Model):
pass
.. doctest:: key-property-constructor
>>> name = "my_value"
>>> ndb.KeyProperty(name)
KeyProperty('my_value')
>>> ndb.KeyProperty(SimpleModel)
KeyProperty(kind='SimpleModel')
>>> ndb.KeyProperty(name, SimpleModel)
KeyProperty('my_value', kind='SimpleModel')
>>> ndb.KeyProperty(SimpleModel, name)
KeyProperty('my_value', kind='SimpleModel')
The type of the positional arguments will be used to determine their
purpose: a string argument is assumed to be the ``name`` and a
:class:`type` argument is assumed to be the ``kind`` (and checked that
the type is a subclass of :class:`Model`).
.. automethod:: _validate
Args:
name (str): The name of the property.
kind (Union[type, str]): The (optional) kind to be stored. If provided
as a positional argument, this must be a subclass of :class:`Model`
otherwise the kind name is sufficient.
indexed (bool): Indicates if the value should be indexed.
repeated (bool): Indicates if this property is repeated, i.e. contains
multiple values.
required (bool): Indicates if this property is required on the given
model type.
default (.Key): The default value for this property.
choices (Iterable[.Key]): A container of allowed values for this
property.
validator (Callable[[~google.cloud.ndb.model.Property, .Key], bool]): A
validator to be used to check values.
verbose_name (str): A longer, user-friendly name for this property.
write_empty_list (bool): Indicates if an empty list should be written
to the datastore.
"""
_kind = None
def __init__(
self,
*args,
name=None,
kind=None,
indexed=None,
repeated=None,
required=None,
default=None,
choices=None,
validator=None,
verbose_name=None,
write_empty_list=None
):
name, kind = self._handle_positional(args, name, kind)
super(KeyProperty, self).__init__(
name=name,
indexed=indexed,
repeated=repeated,
required=required,
default=default,
choices=choices,
validator=validator,
verbose_name=verbose_name,
write_empty_list=write_empty_list,
)
if kind is not None:
self._kind = kind
@staticmethod
def _handle_positional(args, name, kind):
"""Handle positional arguments.
In particular, assign them to the "correct" values and make sure
they don't collide with the relevant keyword arguments.
Args:
args (tuple): The positional arguments provided to the
constructor.
name (Optional[str]): The name that was provided as a keyword
argument to the constructor.
kind (Optional[Union[type, str]]): The kind that was provided as a
keyword argument to the constructor.
Returns:
Tuple[Optional[str], Optional[str]]: The ``name`` and ``kind``
inferred from the arguments. Either may be :data:`None`.
Raises:
TypeError: If ``args`` has more than 2 elements.
TypeError: If a valid ``name`` type (i.e. a string) is specified
twice in ``args``.
TypeError: If a valid ``kind`` type (i.e. a subclass of
:class:`Model`) is specified twice in ``args``.
TypeError: If an element in ``args`` is not a :class:`str` or a
subclass of :class:`Model`.
TypeError: If a ``name`` is specified both in ``args`` and via
the ``name`` keyword.
TypeError: If a ``kind`` is specified both in ``args`` and via
the ``kind`` keyword.
TypeError: If a ``kind`` was provided via ``keyword`` and is
not a :class:`str` or a subclass of :class:`Model`.
"""
# Limit positional arguments.
if len(args) > 2:
raise TypeError(
"The KeyProperty constructor accepts at most two "
"positional arguments."
)
# Filter out None
args = [value for value in args if value is not None]
# Determine the name / kind inferred from the positional arguments.
name_via_positional = None
kind_via_positional = None
for value in args:
if isinstance(value, str):
if name_via_positional is None:
name_via_positional = value
else:
raise TypeError("You can only specify one name")
elif isinstance(value, type) and issubclass(value, Model):
if kind_via_positional is None:
kind_via_positional = value
else:
raise TypeError("You can only specify one kind")
else:
raise TypeError(
"Unexpected positional argument: {!r}".format(value)
)
# Reconcile the two possible ``name``` values.
if name_via_positional is not None:
if name is None:
name = name_via_positional
else:
raise TypeError("You can only specify name once")
# Reconcile the two possible ``kind``` values.
if kind_via_positional is None:
if isinstance(kind, type) and issubclass(kind, Model):
kind = kind._get_kind()
else:
if kind is None:
kind = kind_via_positional._get_kind()
else:
raise TypeError("You can only specify kind once")
# Make sure the ``kind`` is a ``str``.
if kind is not None and not isinstance(kind, str):
raise TypeError("kind must be a Model class or a string")
return name, kind
def _constructor_info(self):
"""Helper for :meth:`__repr__`.
Yields:
Tuple[str, bool]: Pairs of argument name and a boolean indicating
if that argument is a keyword.
"""
yield "name", False
yield "kind", True
from_inspect = super(KeyProperty, self)._constructor_info()
for name, is_keyword in from_inspect:
if name in ("args", "name", "kind"):
continue
yield name, is_keyword
def _validate(self, value):
"""Validate a ``value`` before setting it.
Args:
value (.Key): The value to check.
Raises:
.BadValueError: If ``value`` is not a :class:`.Key`.
.BadValueError: If ``value`` is a partial :class:`.Key` (i.e. it
has no name or ID set).
.BadValueError: If the current property has an associated ``kind``
and ``value`` does not match that kind.
"""
if not isinstance(value, Key):
raise exceptions.BadValueError(
"Expected Key, got {!r}".format(value)
)
# Reject incomplete keys.
if not value.id():
raise exceptions.BadValueError(
"Expected complete Key, got {!r}".format(value)
)
# Verify kind if provided.
if self._kind is not None:
if value.kind() != self._kind:
raise exceptions.BadValueError(
"Expected Key with kind={!r}, got "
"{!r}".format(self._kind, value)
)
def _db_set_value(self, v, unused_p, value):
"""Helper for :meth:`_serialize`.
Raises:
NotImplementedError: Always. This method is virtual.
"""
raise NotImplementedError
def _db_get_value(self, v, unused_p):
"""Helper for :meth:`_deserialize`.
Raises:
NotImplementedError: Always. This method is deprecated.
"""
raise exceptions.NoLongerImplementedError()
class BlobKeyProperty(Property):
"""A property containing :class:`~google.cloud.ndb.model.BlobKey` values.
.. automethod:: _validate
"""
__slots__ = ()
def _validate(self, value):
"""Validate a ``value`` before setting it.
Args:
value (~google.cloud.ndb.model.BlobKey): The value to check.
Raises:
.BadValueError: If ``value`` is not a
:class:`~google.cloud.ndb.model.BlobKey`.
"""
if not isinstance(value, BlobKey):
raise exceptions.BadValueError(
"Expected BlobKey, got {!r}".format(value)
)
def _db_set_value(self, v, p, value):
"""Helper for :meth:`_serialize`.
Raises:
NotImplementedError: Always. This method is virtual.
"""
raise NotImplementedError
def _db_get_value(self, v, unused_p):
"""Helper for :meth:`_deserialize`.
Raises:
NotImplementedError: Always. This method is deprecated.
"""
raise exceptions.NoLongerImplementedError()
class DateTimeProperty(Property):
"""A property that contains :class:`~datetime.datetime` values.
This property expects "naive" datetime stamps, i.e. no timezone can
be set. Furthermore, the assumption is that naive datetime stamps
represent UTC.
.. note::
Unlike Django, ``auto_now_add`` can be overridden by setting the
value before writing the entity. And unlike the legacy
``google.appengine.ext.db``, ``auto_now`` does not supply a default
value. Also unlike legacy ``db``, when the entity is written, the
property values are updated to match what was written. Finally, beware
that this also updates the value in the in-process cache, **and** that
``auto_now_add`` may interact weirdly with transaction retries (a retry
of a property with ``auto_now_add`` set will reuse the value that was
set on the first try).
.. automethod:: _validate
.. automethod:: _prepare_for_put
Args:
name (str): The name of the property.
auto_now (bool): Indicates that the property should be set to the
current datetime when an entity is created and whenever it is
updated.
auto_now_add (bool): Indicates that the property should be set to the
current datetime | |
return self * ~other
div = _div_
def __call__(self, y):
"""
Return the composition of this power series and the power series y.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: s = L([1])
sage: t = L([0,0,1])
sage: u = s(t)
sage: u.coefficients(11)
[1, 0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
Test Compose 2
::
sage: s = L([1])
sage: t = L([0,0,1,0])
sage: u = s(t)
sage: u.aorder
0
sage: u.order
Unknown series order
sage: u.coefficients(10)
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
sage: u.aorder
0
sage: u.order
0
Test Compose 3 s = 1/(1-x), t = x/(1-x) s(t) = (1-x)/(1-2x)
::
sage: s = L([1])
sage: t = L([0,1])
sage: u = s(t)
sage: u.coefficients(14)
[1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
"""
return self._new(partial(self._compose_gen, y), lambda a,b:a*b, self, y)
composition = __call__
def _compose_gen(self, y, ao):
"""
Return a iterator for the coefficients of the composition of this
power series with the power series y.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: s = L([1])
sage: t = L([0,1])
sage: g = s._compose_gen(t, 0)
sage: [next(g) for i in range(10)]
[1, 1, 2, 4, 8, 16, 32, 64, 128, 256]
"""
assert y.coefficient(0) == 0
yield self._stream[0]
z = self.tail().compose(y) * y
z.coefficient(1)
n = 1
while True:
yield z._stream[n]
n += 1
def tail(self):
"""
Return the power series whose coefficients obtained by subtracting
the constant term from this series and then dividing by x.
EXAMPLES::
sage: from sage.combinat.species.stream import Stream
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L(range(20))
sage: g = f.tail()
sage: g.coefficients(10)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
return self._new(lambda a0: self.iterator(1), bounded_decrement, self)
def iterator(self, n=0, initial=None):
"""
Return an iterator for the coefficients of self starting at n.
EXAMPLES::
sage: from sage.combinat.species.stream import Stream
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L(range(10))
sage: g = f.iterator(2)
sage: [next(g) for i in range(5)]
[2, 3, 4, 5, 6]
sage: g = f.iterator(2, initial=[0,0])
sage: [next(g) for i in range(5)]
[0, 0, 2, 3, 4]
"""
if initial is not None:
for x in initial:
yield x
while True:
yield self._stream[n]
n += 1
compose = __call__
def _power_gen(self):
"""
Return a generator for all the powers self^k starting with k = 1.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L([1,1,0])
sage: g = f._power_gen()
sage: next(g).coefficients(5)
[1, 1, 0, 0, 0]
sage: next(g).coefficients(5)
[1, 2, 1, 0, 0]
sage: next(g).coefficients(5)
[1, 3, 3, 1, 0]
"""
z = self
while True:
yield z
z = z*self
def derivative(self):
"""
EXAMPLES::
sage: from sage.combinat.species.stream import Stream
sage: L = LazyPowerSeriesRing(QQ)
sage: one = L(1)
sage: monom = L.gen()
sage: s = L([1])
sage: u = s.derivative()
sage: u.coefficients(10)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
::
sage: s = L()
sage: s._name = 's'
sage: s.define(one+monom*s*s)
sage: u = s.derivative()
sage: u.coefficients(5) #[1*1, 2*2, 3*5, 4*14, 5*42]
[1, 4, 15, 56, 210]
::
sage: s = L([1])
sage: t = L([0,1])
sage: u = s(t).derivative()
sage: v = (s.derivative().compose(t))*t.derivative()
sage: u.coefficients(11)
[1, 4, 12, 32, 80, 192, 448, 1024, 2304, 5120, 11264]
sage: v.coefficients(11)
[1, 4, 12, 32, 80, 192, 448, 1024, 2304, 5120, 11264]
::
sage: s = L(); s._name='s'
sage: t = L(); t._name='t'
sage: s.define(monom+t*t*t)
sage: t.define(monom+s*s)
sage: u = (s*t).derivative()
sage: v = s.derivative()*t + s*t.derivative()
sage: u.coefficients(10)
[0, 2, 3, 4, 30, 72, 133, 552, 1791, 4260]
sage: v.coefficients(10)
[0, 2, 3, 4, 30, 72, 133, 552, 1791, 4260]
sage: u.coefficients(10) == v.coefficients(10)
True
::
sage: f = L._new_initial(2, Stream([0,0,4,5,6,0]))
sage: d = f.derivative()
sage: d.get_aorder()
1
sage: d.coefficients(5)
[0, 8, 15, 24, 0]
"""
return self._new(self._diff_gen, bounded_decrement, self)
def _diff_gen(self, ao):
"""
Return an iterator for the coefficients of the derivative of self.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L([1])
sage: g = f._diff_gen(0)
sage: [next(g) for i in range(10)]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
n = 1
while True:
yield n*self._stream[n]
n += 1
###########
#Integrals#
###########
def integral(self, integration_constant = 0):
"""
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: zero = L(0)
sage: s = zero
sage: t = s.integral()
sage: t.is_zero()
True
::
sage: s = zero
sage: t = s.integral(1)
sage: t.coefficients(6)
[1, 0, 0, 0, 0, 0]
sage: t._stream.is_constant()
True
::
sage: s = L.term(1, 0)
sage: t = s.integral()
sage: t.coefficients(6)
[0, 1, 0, 0, 0, 0]
sage: t._stream.is_constant()
True
::
sage: s = L.term(1,0)
sage: t = s.integral(1)
sage: t.coefficients(6)
[1, 1, 0, 0, 0, 0]
sage: t._stream.is_constant()
True
::
sage: s = L.term(1, 4)
sage: t = s.integral()
sage: t.coefficients(10)
[0, 0, 0, 0, 0, 1/5, 0, 0, 0, 0]
::
sage: s = L.term(1,4)
sage: t = s.integral(1)
sage: t.coefficients(10)
[1, 0, 0, 0, 0, 1/5, 0, 0, 0, 0]
TESTS::
sage: from sage.combinat.species.stream import Stream
sage: f = L._new_initial(2, Stream([0,0,4,5,6,0]))
sage: i = f.derivative().integral()
sage: i.get_aorder()
2
sage: i.coefficients(5)
[0, 0, 4, 5, 6]
sage: i = f.derivative().integral(1)
sage: i.get_aorder()
0
sage: i.coefficients(5)
[1, 0, 4, 5, 6]
"""
if integration_constant == 0:
return self._new(self._integral_zero_gen, increment, self)
else:
L = self.parent()
return L._new_initial(0, Stream(self._integral_nonzero_gen(integration_constant)))
def _integral_zero_gen(self, ao):
"""
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: s = L.gen()
sage: g = s._integral_zero_gen(1)
sage: [next(g) for i in range(5)]
[0, 0, 1/2, 0, 0]
"""
for n in range(ao):
yield self.parent().zero()
n = ao
while True:
#Check to see if the stream is finite
if self.is_finite(n-1):
yield self._stream[n-1]
break
else:
yield (Integer(1)/Integer(n))*self._stream[n-1]
n += 1
def _integral_nonzero_gen(self, integration_constant):
"""
EXAMPLES::
sage: from sage.combinat.species.stream import Stream
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L._new_initial(2, Stream([0,0,4,5,6,0])).derivative()
sage: g = f._integral_nonzero_gen(1)
sage: [next(g) for i in range(5)]
[1, 0, 4, 5, 6]
"""
yield integration_constant
ao = self.aorder
assert ao != unk
if ao == inf:
yield self.parent()._zero_base_ring
else:
for _ in range(ao-1):
yield self.parent()._zero_base_ring
n = max(1, ao)
while True:
self.coefficient(n - 1)
#Check to see if the stream is finite
if self.is_finite(n-1):
yield self.coefficient(n-1)
break
else:
yield (Integer(1)/Integer(n))*self.coefficient(n-1)
n += 1
def is_finite(self, n=None):
"""
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: a = L([0,0,1,0,0]); a
O(1)
sage: a.is_finite()
False
sage: c = a[4]
sage: a.is_finite()
False
sage: a.is_finite(4)
False
sage: c = a[5]
sage: a.is_finite()
True
sage: a.is_finite(4)
True
"""
if self.order is inf:
return True
s = self._stream
if n is None:
n = len(s)
if s.is_constant() and all(s[i] == 0 for i in range(n-1, max(n,len(s)))):
return True
return False
def exponential(self):
"""
TESTS::
sage: def inv_factorial():
....: q = 1
....: yield 0
....: yield q
....: n = 2
....: while True:
....: q = q / n
....: yield q
....: n += 1
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L(inv_factorial()) #e^(x)-1
sage: u = f.exponential()
sage: g = inv_factorial()
sage: z1 = [1,1,2,5,15,52,203,877,4140,21147,115975]
sage: l1 = [z*next(g) for z in z1]
sage: l1 = [1] + l1[1:]
sage: u.coefficients(11)
[1, 1, 1, 5/6, 5/8, 13/30, 203/720, 877/5040, 23/224, 1007/17280, 4639/145152]
sage: l1 == u.coefficients(11)
True
"""
base_ring = self.parent().base_ring()
s = self.parent()()
s.define( (self.derivative()*s).integral(base_ring(1)) )
return s
def __getitem__(self, i):
"""
Return the ith coefficient of self.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L([1,2,3,0])
sage: [f[i] for i in range(5)]
[1, 2, 3, 0, 0]
"""
return self.coefficient(i)
#########################
#Min and max restriction#
#########################
def restricted(self, min=None, max=None):
"""
Return the power series restricted to the coefficients starting at
``min`` and going up to, but not including ``max``.
If ``min`` is not specified, then it is assumed to be zero. If
``max`` is not specified, then it is assumed to be infinity.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: a = L([1])
sage: a.restricted().coefficients(10)
[1, 1, | |
)
idxs = np.nonzero(cond)[0]
assert len(idxs) == 1
idx = idxs[0]
orig_data2d_rownum = frame2d_idxs[idx]
if not old_sync_timestamp_source:
# Change the next line to 'timestamp' for old
# data (before May/June 2009 -- the switch to
# fview_ext_trig)
frame_timestamp = frame2d[idx]["cam_received_timestamp"]
else:
# previous version
frame_timestamp = frame2d[idx]["timestamp"]
found = None
for fmf, fmf_timestamp_qi in movie_tups_for_this_camn:
fmf_fnos = fmf_timestamp_qi.get_idxs(frame_timestamp)
if not len(fmf_fnos):
continue
assert len(fmf_fnos) == 1
# should only be one .ufmf with this frame and cam_id
assert found is None
fmf_fno = fmf_fnos[0]
found = (fmf, fmf_fno)
if found is None:
print(
"no image data for frame timestamp %s cam_id %s"
% (repr(frame_timestamp), cam_id)
)
continue
fmf, fmf_fno = found
image, fmf_timestamp = fmf.get_frame(fmf_fno)
mean_image = fmf.get_mean_for_timestamp(fmf_timestamp)
coding = fmf.get_format()
if imops.is_coding_color(coding):
image = imops.to_rgb8(coding, image)
mean_image = imops.to_rgb8(coding, mean_image)
else:
image = imops.to_mono8(coding, image)
mean_image = imops.to_mono8(coding, mean_image)
xy = (
int(round(frame2d[idx]["x"])),
int(round(frame2d[idx]["y"])),
)
maxsize = (fmf.get_width(), fmf.get_height())
# Accumulate cropped images. Note that the region
# of the full image that the cropped image
# occupies changes over time as the tracked object
# moves. Thus, averaging these cropped-and-shifted
# images is not the same as simply averaging the
# full frame.
roiradius = 25
warnings.warn(
"roiradius hard-coded to %d: could be set "
"from 3D tracking" % roiradius
)
tmp = clip_and_math(image, mean_image, xy, roiradius, maxsize)
im_coords, raw_im, mean_im, absdiff_im = tmp
max_absdiff_im = absdiff_im.max()
intermediate_thresh = intermediate_thresh_frac * max_absdiff_im
absdiff_im[absdiff_im <= intermediate_thresh] = 0
if erode > 0:
morphed_im = scipy.ndimage.grey_erosion(
absdiff_im, size=erode
)
## morphed_im = scipy.ndimage.binary_erosion(absdiff_im>1).astype(np.float32)*255.0
else:
morphed_im = absdiff_im
y0_roi, x0_roi = scipy.ndimage.center_of_mass(morphed_im)
x0 = im_coords[0] + x0_roi
y0 = im_coords[1] + y0_roi
if 1:
morphed_im_binary = morphed_im > 0
labels, n_labels = scipy.ndimage.label(morphed_im_binary)
morph_fail_because_multiple_blobs = False
if n_labels > 1:
x0, y0 = np.nan, np.nan
# More than one blob -- don't allow image.
if 1:
# for min flattening
morphed_im = np.empty(
morphed_im.shape, dtype=np.uint8
)
morphed_im.fill(255)
morph_fail_because_multiple_blobs = True
else:
# for mean flattening
morphed_im = np.zeros_like(morphed_im)
morph_fail_because_multiple_blobs = True
this_obj_framenumbers[camn].append(framenumber)
if save_images:
this_obj_raw_images[camn].append((raw_im, im_coords))
this_obj_mean_images[camn].append(mean_im)
this_obj_absdiff_images[camn].append(absdiff_im)
this_obj_morphed_images[camn].append(morphed_im)
this_obj_morph_failures[camn].append(
morph_fail_because_multiple_blobs
)
this_obj_im_coords[camn].append(im_coords)
this_obj_com_coords[camn].append((x0, y0))
this_obj_camn_pt_no[camn].append(orig_data2d_rownum)
if 0:
fname = "obj%05d_%s_frame%07d_pt%02d.png" % (
obj_id,
cam_id,
framenumber,
camn_pt_no,
)
plot_image_subregion(
raw_im,
mean_im,
absdiff_im,
roiradius,
fname,
im_coords,
view=filename2view[fmf.filename],
)
# Now, all the frames from all cameras for this obj_id
# have been gathered. Do a camera-by-camera analysis.
for camn in this_obj_absdiff_images:
cam_id = camn2cam_id[camn]
image_framenumbers = np.array(this_obj_framenumbers[camn])
if save_images:
raw_images = this_obj_raw_images[camn]
mean_images = this_obj_mean_images[camn]
absdiff_images = this_obj_absdiff_images[camn]
morphed_images = this_obj_morphed_images[camn]
morph_failures = np.array(this_obj_morph_failures[camn])
im_coords = this_obj_im_coords[camn]
com_coords = this_obj_com_coords[camn]
camn_pt_no_array = this_obj_camn_pt_no[camn]
all_framenumbers = np.arange(
image_framenumbers[0], image_framenumbers[-1] + 1
)
com_coords = np.array(com_coords)
if do_rts_smoothing:
# Perform RTS smoothing on center-of-mass coordinates.
# Find first good datum.
fgnz = np.nonzero(~np.isnan(com_coords[:, 0]))
com_coords_smooth = np.empty(com_coords.shape, dtype=np.float)
com_coords_smooth.fill(np.nan)
if len(fgnz[0]):
first_good = fgnz[0][0]
RTS_com_coords = com_coords[first_good:, :]
# Setup parameters for Kalman filter.
dt = 1.0 / fps
A = np.array(
[
[1, 0, dt, 0], # process update
[0, 1, 0, dt],
[0, 0, 1, 0],
[0, 0, 0, 1],
],
dtype=np.float,
)
C = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0]], # observation matrix
dtype=np.float,
)
Q = 0.1 * np.eye(4) # process noise
R = 1.0 * np.eye(2) # observation noise
initx = np.array(
[RTS_com_coords[0, 0], RTS_com_coords[0, 1], 0, 0],
dtype=np.float,
)
initV = 2 * np.eye(4)
initV[0, 0] = 0.1
initV[1, 1] = 0.1
y = RTS_com_coords
xsmooth, Vsmooth = adskalman.adskalman.kalman_smoother(
y, A, C, Q, R, initx, initV
)
com_coords_smooth[first_good:] = xsmooth[:, :2]
# Now shift images
image_shift = com_coords_smooth - com_coords
bad_cond = np.isnan(image_shift[:, 0])
# broadcast zeros to places where no good tracking
image_shift[bad_cond, 0] = 0
image_shift[bad_cond, 1] = 0
shifted_morphed_images = [
shift_image(im, xy)
for im, xy in zip(morphed_images, image_shift)
]
results = flatten_image_stack(
image_framenumbers,
shifted_morphed_images,
im_coords,
camn_pt_no_array,
N=stack_N_images,
)
else:
results = flatten_image_stack(
image_framenumbers,
morphed_images,
im_coords,
camn_pt_no_array,
N=stack_N_images,
)
# The variable fno (the first element of the results
# tuple) is guaranteed to be contiguous and to span
# the range from the first to last frames available.
for (
fno,
av_im,
lowerleft,
orig_data2d_rownum,
orig_idx,
orig_idxs_in_average,
) in results:
# Clip image to reduce moment arms.
av_im[av_im <= final_thresh] = 0
fail_fit = False
fast_av_im = FastImage.asfastimage(av_im.astype(np.uint8))
try:
(x0_roi, y0_roi, area, slope, eccentricity) = fpc.fit(
fast_av_im
)
except realtime_image_analysis.FitParamsError as err:
fail_fit = True
this_morph_failures = morph_failures[orig_idxs_in_average]
n_failed_images = np.sum(this_morph_failures)
n_good_images = stack_N_images - n_failed_images
if n_good_images >= stack_N_images_min:
n_images_is_acceptable = True
else:
n_images_is_acceptable = False
if fail_fit:
x0_roi = np.nan
y0_roi = np.nan
area, slope, eccentricity = np.nan, np.nan, np.nan
if not n_images_is_acceptable:
x0_roi = np.nan
y0_roi = np.nan
area, slope, eccentricity = np.nan, np.nan, np.nan
x0 = x0_roi + lowerleft[0]
y0 = y0_roi + lowerleft[1]
if 1:
for row in dest_table.iterrows(
start=orig_data2d_rownum, stop=orig_data2d_rownum + 1
):
row["x"] = x0
row["y"] = y0
row["area"] = area
row["slope"] = slope
row["eccentricity"] = eccentricity
row.update() # save data
if save_images:
# Display debugging images
fname = "av_obj%05d_%s_frame%07d.png" % (
obj_id,
cam_id,
fno,
)
if save_image_dir is not None:
fname = os.path.join(save_image_dir, fname)
raw_im, raw_coords = raw_images[orig_idx]
mean_im = mean_images[orig_idx]
absdiff_im = absdiff_images[orig_idx]
morphed_im = morphed_images[orig_idx]
raw_l, raw_b = raw_coords[:2]
imh, imw = raw_im.shape[:2]
n_ims = 5
if 1:
# increase contrast
contrast_scale = 2.0
av_im_show = np.clip(av_im * contrast_scale, 0, 255)
margin = 10
scale = 3
# calculate the orientation line
yintercept = y0 - slope * x0
xplt = np.array(
[
lowerleft[0] - 5,
lowerleft[0] + av_im_show.shape[1] + 5,
]
)
yplt = slope * xplt + yintercept
if 1:
# only send non-nan values to plot
plt_good = ~np.isnan(xplt) & ~np.isnan(yplt)
xplt = xplt[plt_good]
yplt = yplt[plt_good]
top_row_width = scale * imw * n_ims + (1 + n_ims) * margin
SHOW_STACK = True
if SHOW_STACK:
n_stack_rows = 4
rw = scale * imw * stack_N_images + (1 + n_ims) * margin
row_width = max(top_row_width, rw)
col_height = (
n_stack_rows * scale * imh
+ (n_stack_rows + 1) * margin
)
stack_margin = 20
else:
row_width = top_row_width
col_height = scale * imh + 2 * margin
stack_margin = 0
canv = benu.Canvas(
fname,
row_width,
col_height + stack_margin,
color_rgba=(1, 1, 1, 1),
)
if SHOW_STACK:
for (stacki, s_orig_idx) in enumerate(
orig_idxs_in_average
):
(s_raw_im, s_raw_coords) = raw_images[s_orig_idx]
s_raw_l, s_raw_b = s_raw_coords[:2]
s_imh, s_imw = s_raw_im.shape[:2]
user_rect = (s_raw_l, s_raw_b, s_imw, s_imh)
x_display = (stacki + 1) * margin + (
scale * imw
) * stacki
for show in ["raw", "absdiff", "morphed"]:
if show == "raw":
y_display = scale * imh + 2 * margin
elif show == "absdiff":
y_display = 2 * scale * imh + 3 * margin
elif show == "morphed":
y_display = 3 * scale * imh + 4 * margin
display_rect = (
x_display,
y_display + stack_margin,
scale * raw_im.shape[1],
scale * raw_im.shape[0],
)
with canv.set_user_coords(
display_rect,
user_rect,
transform=cam_id2view[cam_id],
):
if show == "raw":
s_im = s_raw_im.astype(np.uint8)
elif show == "absdiff":
tmp = absdiff_images[s_orig_idx]
s_im = tmp.astype(np.uint8)
elif show == "morphed":
tmp = morphed_images[s_orig_idx]
s_im = tmp.astype(np.uint8)
canv.imshow(s_im, s_raw_l, s_raw_b)
sx0, sy0 = com_coords[s_orig_idx]
X = [sx0]
Y = [sy0]
# the raw coords in red
canv.scatter(
X, Y, color_rgba=(1, 0.5, 0.5, 1)
)
if do_rts_smoothing:
sx0, sy0 = com_coords_smooth[s_orig_idx]
X = [sx0]
Y = [sy0]
# the RTS smoothed coords in green
canv.scatter(
X, Y, color_rgba=(0.5, 1, 0.5, 1)
)
if s_orig_idx == orig_idx:
boxx = np.array(
[
s_raw_l,
s_raw_l,
s_raw_l + s_imw,
s_raw_l + s_imw,
s_raw_l,
]
)
boxy = np.array(
[
s_raw_b,
s_raw_b + s_imh,
s_raw_b + s_imh,
s_raw_b,
s_raw_b,
]
)
canv.plot(
boxx,
boxy,
color_rgba=(0.5, 1, 0.5, 1),
)
if show == "morphed":
canv.text(
"morphed %d" % (s_orig_idx - orig_idx,),
display_rect[0],
(
display_rect[1]
+ display_rect[3]
+ stack_margin
- 20
),
font_size=font_size,
color_rgba=(1, 0, 0, 1),
)
# Display raw_im
display_rect = (
| |
"""
American Academy of Actuaries stochastic log volatility interest rate process
References
----------
https://www.actuary.org/sites/default/files/pdf/life/lbrc_dec08.pdf, page 8
"""
from typing import Dict
import numpy as np
from pyesg.stochastic_process import StochasticProcess
class AcademyRateProcess(StochasticProcess):
"""
American Academy of Actuaries stochastic log volatility process. Models three linked
processes:
1. long-term-rate (internally modeled as a process on the log-rate)
2. nominal spread between long-term rate and short-term rate
3. monthly-volatility of the long-rate process (internally modeled as log-vol)
NOTE : most parameters provided as defaults are _monthly_ parameters, not _annual_
parameters; to keep consistent with the Academy Excel workbook, these values are
kept as the monthly defaults. Internally, the model converts them to annual
values. The Excel workbook is scaled to monthly timesteps, whereas this model is
scaled to annual timesteps. We can replicate the Excel results here by calling
`dt=1./12` to get monthly output steps.
Parameters
----------
beta1 : float, default 0.00509, reversion strength for long-rate process
beta2 : float, default 0.02685, reversion strength for spread process
beta3 : float, default 0.04001, reversion strength for volatility process
rho12 : float, default -0.19197, correlation between long-rate & spread
rho13 : float, default 0.0, correlation between long-rate & volatility
rho23 : float, default 0.0, correlation between spread & volatility
sigma2 : float, default 0.04148, volatility of the spread process
sigma3 : float, default 0.11489, volatility of the volatility process
tau1 : float, default 0.035, mean reversion value for long-rate process
tau2 : float, default 0.01, mean reversion value for spread process
tau3 : float, default 0.0287, mean reversion value for volatility process
theta : float, default 1.0, spread volatility factor exponent
phi : float, default 0.0002, spread tilting parameter
psi : float, default 0.25164, steepness adjustment
long_rate_max : float, default 0.18, soft cap of the long rate before perturbing
long_rate_min : float, default 0.0115, soft floor of the long rate before perturbing
Examples
--------
>>> arp = AcademyRateProcess.example()
>>> arp.correlation
array([[ 1. , -0.19197, 0. ],
[-0.19197, 1. , 0. ],
[ 0. , 0. , 1. ]])
>>> arp.drift(x0=[0.03, 0.0024, 0.03])
array([ 0.03236509, 0.00207876, -0.02126944])
>>> arp.diffusion(x0=[0.03, 0.0024, 0.03])
array([[ 0.10392305, 0. , 0. ],
[-0.00082753, 0.00423055, 0. ],
[ 0. , 0. , 0.39799063]])
>>> arp.expectation(x0=[0.03, 0.0024, 0.03], dt=1./12)
array([0.03008102, 0.00257323, 0.02994687])
>>> arp.standard_deviation(x0=[0.03, 0.0024, 0.03], dt=1./12)
array([[ 0.03 , 0. , 0. ],
[-0.00023889, 0.00122126, 0. ],
[ 0. , 0. , 0.11489 ]])
>>> arp.step(x0=[0.03, 0.0024, 0.03], dt=1./12, random_state=42)
array([0.03053263, 0.00228572, 0.03226032])
References
----------
https://www.actuary.org/sites/default/files/pdf/life/lbrc_dec08.pdf, page 8
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-locals
def __init__(
self,
beta1: float = 0.00509, # reversion strength for long-rate process
beta2: float = 0.02685, # reversion strength for spread process
beta3: float = 0.04001, # reversion strength for volatility process
rho12: float = -0.19197, # correlation between long-rate & spread
rho13: float = 0.0, # correlation between long-rate & volatility
rho23: float = 0.0, # correlation between spread & volatility
sigma2: float = 0.04148, # volatility of the spread process
sigma3: float = 0.11489, # volatility of the volatility process
tau1: float = 0.035, # mean reversion value for long-rate process
tau2: float = 0.01, # mean reversion value for spread process
tau3: float = 0.0287, # mean reversion value for volatility process
theta: float = 1.0, # spread volatility factor exponent
phi: float = 0.0002, # spread tilting parameter
psi: float = 0.25164, # steepness adjustment
long_rate_max: float = 0.18, # soft cap of the long rate before perturbing
long_rate_min: float = 0.0115, # soft floor of the long rate before perturbing
) -> None:
super().__init__(dim=3)
self.beta1 = beta1 * 12 # annualize the monthly-based parameter
self.beta2 = beta2 * 12 # annualize the monthly-based parameter
self.beta3 = beta3 * 12 # annualize the monthly-based parameter
self.rho12 = rho12
self.rho13 = rho13
self.rho23 = rho23
self.sigma2 = sigma2 * 12 ** 0.5 # annualize the monthly-based parameter
self.sigma3 = sigma3 * 12 ** 0.5 # annualize the monthly-based parameter
self.tau1 = tau1
self.tau2 = tau2
self.tau3 = tau3
self.theta = theta
self.phi = phi * 12 # annualize the monthly-based parameter
self.psi = psi * 12 # annualize the monthly-based parameter
self.long_rate_max = long_rate_max
self.long_rate_min = long_rate_min
@property
def correlation(self) -> np.ndarray:
"""Returns the correlation matrix of the processes"""
return np.array(
[
[1.0, self.rho12, self.rho13],
[self.rho12, 1.0, self.rho23],
[self.rho13, self.rho23, 1.0],
]
)
def coefs(self) -> Dict[str, float]:
return dict(
beta1=self.beta1,
beta2=self.beta2,
beta3=self.beta3,
rho12=self.rho12,
rho13=self.rho13,
rho23=self.rho23,
sigma2=self.sigma2,
sigma3=self.sigma3,
tau1=self.tau1,
tau2=self.tau2,
tau3=self.tau3,
theta=self.theta,
phi=self.phi,
psi=self.psi,
long_rate_max=self.long_rate_max,
long_rate_min=self.long_rate_min,
)
def _apply(self, x0: np.ndarray, dx: np.ndarray) -> np.ndarray:
# long-rate (x0[0]) is modeled internally as a log process, so we use exp
# spread (x0[1]) is an arithmetic process
# volatility (x0[2]) is modeled internally as a log-process, so we use exp
out = np.empty_like(x0)
if x0.ndim == 1:
longrate_ = np.s_[0]
spread_ = np.s_[1]
volatility_ = np.s_[2]
else:
longrate_ = np.s_[:, 0]
spread_ = np.s_[:, 1]
volatility_ = np.s_[:, 2]
out[longrate_] = x0[longrate_] * np.exp(dx[longrate_])
out[spread_] = x0[spread_] + dx[spread_]
out[volatility_] = x0[volatility_] * np.exp(dx[volatility_])
return out
def _drift(self, x0: np.ndarray) -> np.ndarray:
# x0 is an array of [long-rate, nominal spread, volatility]
# create a new array to store the output, then simultaneously update all terms
drift = np.empty_like(x0)
# how do we need to slice the input array? store these before proceeding
if x0.ndim == 1:
longrate_ = np.s_[0]
spread_ = np.s_[1]
volatility_ = np.s_[2]
else:
longrate_ = np.s_[:, 0]
spread_ = np.s_[:, 1]
volatility_ = np.s_[:, 2]
# --volatility of the long-rate process--
# internally we model the monthly-log-volatility of the long-rate process; this
# variable follows an ornstein-uhlenbeck process with mean reversion parameter
# tau3, and mean reversion speed beta3. Because we are modeling log-volatiliy,
# we take the log of tau3 and the initial volatility level, x0[2]. This class's
# "apply" method will use exponentiation to update the value of volatility,
# so the output of the model is "converted" back to non-log volatility.
drift[volatility_] = self.beta3 * np.log(self.tau3 / x0[volatility_])
# --spread between long-rate and short-rate--
# the spread follows an ornstein-uhlenbeck process with mean reversion parameter
# tau2 and mean reversion speed beta2. We model nominal spread, so the effect is
# additive to the original level of the spread. We also add a component based on
# the level of the log-long-rate compared to its mean reversion rate, tau1. This
# is multiplied by a factor, phi, and added to the drift component of the spread
drift[spread_] = self.beta2 * (self.tau2 - x0[spread_]) + self.phi * np.log(
x0[longrate_] / self.tau1
)
# --long-term interest rate--
# internally we model the log-long-term rate as an ornstein-uhlenbeck process
# with mean reversion level tau1 and mean reversion speed beta1. We also add an
# effect based on the level of the spread relative to its long term mean times a
# factor, psi. Before we calculate the drift we set upper and lower bounds on
# the long term rate so it falls within a certain range. This range may be
# exceeded based on the random perturbations that are added later, which is ok.
# Similar to volatility, we apply the changes here using exponentiation.
drift[longrate_] = self.beta1 * np.log(self.tau1 / x0[longrate_]) + self.psi * (
self.tau2 - x0[spread_]
)
drift[longrate_] = np.minimum(
np.log(self.long_rate_max / x0[longrate_]), drift[longrate_]
)
drift[longrate_] = np.maximum(
np.log(self.long_rate_min / x0[longrate_]), drift[longrate_]
)
return drift
def _diffusion(self, x0: np.ndarray) -> np.ndarray:
# diffusion is the covariance diagonal times the cholesky correlation matrix
# x0 is an array of [long-rate, spread, volatility]
# how do we need to slice the input array? store these before proceeding
if x0.ndim == 1:
longrate_ = np.s_[0]
spread_ = np.s_[1]
volatility_ = np.s_[2]
else:
longrate_ = np.s_[:, 0]
spread_ = np.s_[:, 1]
volatility_ = np.s_[:, 2]
# diffusion matrix starts as a copy of the initial array; we'll update below
diffusion = np.empty_like(x0)
| |
from __future__ import division, print_function, absolute_import
from multiprocessing import Process, Pipe
import yaml
import cv2
# Konwn issue: - No module named 'scipy.spatial.transform', To resolve, try pip3 install scipy==1.2
from scipy.spatial.transform import Rotation as R
from real_robots.constants import *
from real_robots.omnirobot_utils.marker_finder import MakerFinder
from real_robots.omnirobot_utils.marker_render import MarkerRender
from real_robots.omnirobot_utils.omnirobot_manager_base import OmnirobotManagerBase
from real_robots.omnirobot_utils.utils import PosTransformer
assert USING_OMNIROBOT_SIMULATOR, "Please set USING_OMNIROBOT_SIMULATOR to True in real_robots/constants.py"
NOISE_VAR_ROBOT_POS = 0.01 # meter
NOISE_VAR_ROBOT_YAW = np.pi/180 * 2.5 # 5 Deg
NOISE_VAR_TARGET_PIXEL = 2 # pixel noise on target marker
NOISE_VAR_ROBOT_PIXEL = 2
NOISE_VAR_ENVIRONMENT = 0.03 # pixel noise of final image on LAB space
NOISE_VAR_ROBOT_SIZE_PROPOTION = 0.05 # noise of robot size propotion
NOISE_VAR_TARGET_SIZE_PROPOTION = 0.05
class OmniRobotEnvRender():
def __init__(self, init_x, init_y, init_yaw, origin_size, cropped_size,
back_ground_path, camera_info_path,
robot_marker_path, robot_marker_margin, target_marker_path, target_marker_margin,
robot_marker_code, target_marker_code,
robot_marker_length, target_marker_length, output_size, history_size=10, **_):
"""
Class for rendering Omnirobot environment
:param init_x: (float) initial x position of robot
:param init_y: (float) initial y position of robot
:param init_yaw: (float) initial yaw position of robot
:param origin_size: (list of int) original camera's size (eg. [640,480]),
the camera matrix should be corresponding to this size
:param cropped_size: (list of int) cropped image's size (eg. [480,480])
:param back_ground_path: (str) back ground image's path, the image should be undistorted.
:param camera_info_path: (str) camera info file's path (containing camera matrix)
:param robot_marker_path: (str) robot maker's path, the marker should have a margin with several pixels
:param robot_marker_margin: (list of int) marker's margin (eg. [3,3,3,3])
:param target_marker_path: (str) target maker's path, the marker should have a margin with several pixels
:param target_marker_margin: (list of int) marker's margin (eg. [3,3,3,3])
:param robot_marker_code: (currently not supported, should be "None" by default) (numpy ndarray) optional,
the code of robot marker, only used for detecting position directly from the image.
:param target_marker_code: (currently not supported, should be "None" by default) (numpy ndarray) optional,
the code of target marker, only used for detecting position directly from the image.
:param robot_marker_length: (float) the physical length of the marker (in meter)
:param target_marker_length: (float) the physical length of the marker (in meter)
:param output_size: (list of int) the output image's size (eg. [224,224])
:param **_: other input params not used, so they are dropped
"""
super(OmniRobotEnvRender, self).__init__()
self.output_size = output_size
# store the size of robot marker
self.robot_marker_size_proprotion = 1.0
# Initialize the direction
self.init_pos = [init_x, init_y]
self.init_yaw = init_yaw
# OmniRobot's real position on the grid
self.robot_pos = np.float32([0, 0])
self.robot_yaw = 0 # in rad
self.history_size = history_size
self.robot_pos_past_k_steps = []
# Last velocity command, used for simulating the controlling of velocity directly
self.last_linear_velocity_cmd = np.float32(
[0, 0]) # in m/s, in robot local frame
self.last_rot_velocity_cmd = 0 # in rad/s
# last wheel speeds command, used for simulating the controlling of wheel speed directly
# [left_speed, front_speed, right_speed]
self.last_wheel_speeds_cmd = np.float32([0, 0, 0])
# OmniRobot's position command on the grid
self.robot_pos_cmd = np.float32(self.init_pos[:])
self.robot_yaw_cmd = self.init_yaw
# Target's set position on the grid
self.target_pos_cmd = np.float32([0, 0])
self.target_yaw_cmd = 0.0
# Target's real position on the grid
self.target_pos = np.float32([0,0])
self.target_yaw = 0
# status of moving
self.move_finished = False
self.target_pos_changed = False
# Distance for each step
self.step_distance = STEP_DISTANCE
with open(camera_info_path, 'r') as stream:
try:
contents = yaml.load(stream)
camera_matrix = np.array(contents['camera_matrix']['data'])
self.origin_size = np.array(
[contents['image_height'], contents['image_width']])
self.camera_matrix = np.reshape(camera_matrix, (3, 3))
self.dist_coeffs = np.array(
contents["distortion_coefficients"]["data"]).reshape((1, 5))
except yaml.YAMLError as exc:
print(exc)
self.cropped_size = [np.min(self.origin_size), np.min(
self.origin_size)] # size after being cropped
# restore the image before being cropped
self.bg_img = np.zeros([*self.origin_size, 3], np.uint8)
self.cropped_margin = (self.origin_size - self.cropped_size)/2.0
self.cropped_range = np.array([self.cropped_margin[0], self.cropped_margin[0]+self.cropped_size[0],
self.cropped_margin[1],
self.cropped_margin[1]+self.cropped_size[1]]).astype(np.int)
back_ground_img = cv2.imread(back_ground_path)
if(back_ground_img.shape[0:2] != self.cropped_size):
print("input back ground image's size: ", back_ground_img.shape)
print("resize to ", self.cropped_size)
self.bg_img[self.cropped_range[0]:self.cropped_range[1], self.cropped_range[2]:self.cropped_range[3], :] \
= cv2.resize(back_ground_img, tuple(self.cropped_size)) # background image
else:
self.bg_img[self.cropped_range[0]:self.cropped_range[1], self.cropped_range[2]:self.cropped_range[3], :] \
= back_ground_img # background image
self.bg_img = cv2.undistort(
self.bg_img, self.camera_matrix, self.dist_coeffs)
# Currently cannot find a solution to re-distort a image...
self.target_bg_img = self.bg_img # background image with target.
self.image = self.bg_img # image with robot and target
# camera installation info
r = R.from_euler('xyz', CAMERA_ROT_EULER_COORD_GROUND, degrees=True)
camera_rot_mat_coord_ground = r.as_dcm()
self.pos_transformer = PosTransformer(self.camera_matrix, self.dist_coeffs,
CAMERA_POS_COORD_GROUND, camera_rot_mat_coord_ground)
self.target_render = MarkerRender(noise_var=NOISE_VAR_TARGET_PIXEL)
self.robot_render = MarkerRender(noise_var=NOISE_VAR_ROBOT_PIXEL)
self.robot_render.setMarkerImage(cv2.imread(
robot_marker_path, cv2.IMREAD_COLOR), robot_marker_margin)
self.target_render.setMarkerImage(cv2.imread(
target_marker_path, cv2.IMREAD_COLOR), target_marker_margin)
if robot_marker_code is not None and target_marker_code is not None:
self.marker_finder = MakerFinder(camera_info_path)
self.marker_finder.setMarkerCode(
'robot', robot_marker_code, robot_marker_length)
self.marker_finder.setMarkerCode(
'target', target_marker_code, target_marker_length)
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
"""
render the different environment luminosity
"""
# variate luminosity and color
origin_image_LAB = cv2.cvtColor(
origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
origin_image_LAB[:, :, 0] = origin_image_LAB[:,
:, 0] * (np.random.randn() * noise_var + 1.0)
origin_image_LAB[:, :, 1] = origin_image_LAB[:,
:, 1] * (np.random.randn() * noise_var + 1.0)
origin_image_LAB[:, :, 2] = origin_image_LAB[:,
:, 2] * (np.random.randn() * noise_var + 1.0)
out_image = cv2.cvtColor(
origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
return out_image
def renderTarget(self):
"""
render the target
"""
self.target_bg_img = self.target_render.addMarker(self.bg_img,
self.pos_transformer.phyPosGround2PixelPos(
self.target_pos.reshape(2, 1)),
self.target_yaw, np.random.randn() * NOISE_VAR_TARGET_SIZE_PROPOTION + 1.0)
def renderRobot(self):
"""
render the image.
"""
self.image = self.robot_render.addMarker(self.target_bg_img,
self.pos_transformer.phyPosGround2PixelPos(
self.robot_pos.reshape(2, 1)),
self.robot_yaw, self.robot_marker_size_proprotion)
def getHistorySize(self):
return self.history_size
def appendToHistory(self, pos):
self.robot_pos_past_k_steps.append(pos)
def popOfHistory(self):
self.robot_pos_past_k_steps.pop(0)
def emptyHistory(self):
self.robot_pos_past_k_steps = []
def getCroppedImage(self):
return self.image[self.cropped_range[0]:self.cropped_range[1], self.cropped_range[2]:self.cropped_range[3], :]
def findMarkers(self):
assert NotImplementedError
# this is not tested
tags_trans_coord_cam, tags_rot_coord_cam = self.marker_finder.getMarkerPose(
self.image, ['robot', 'target'], True)
if 'robot' in tags_trans_coord_cam.keys():
self.robot_pos = self.pos_transformer.phyPosCam2PhyPosGround(
tags_trans_coord_cam['robot'])
tag_rot_coord_ground = np.matmul(
self.pos_transformer.camera_2_ground_trans[0:3, 0:3], tags_rot_coord_cam['robot'])[0:3, 0:3]
self.robot_yaw = R.from_dcm(
tag_rot_coord_ground).as_euler('zyx', degree=False)
print("robot_error: ". self.robot_pos - self.robot_pos_cmd)
print("robot_yaw_error: ". self.robot_yaw - self.robot_yaw_cmd)
if 'target' in tags_trans_coord_cam.keys():
self.target_pos = self.pos_transformer.phyPosCam2PhyPosGround(
tags_trans_coord_cam['target'])
tag_rot_coord_ground = np.matmul(self.pos_transformer.camera_2_ground_trans[0:3, 0:3],
tags_rot_coord_cam['target'])[0:3, 0:3]
self.target_yaw = R.from_dcm(
tag_rot_coord_ground).as_euler('zyx', degree=False)
print("target_error: ", self.target_pos - self.target_pos_cmd)
print("target_yaw_error: ", self.target_yaw - self.target_yaw_cmd)
def setRobotCmdConstrained(self, x, y, yaw):
self.robot_pos_cmd[0] = max(x, MIN_X)
self.robot_pos_cmd[0] = min(x, MAX_X)
self.robot_pos_cmd[1] = max(y, MIN_Y)
self.robot_pos_cmd[1] = min(y, MAX_Y)
self.robot_yaw_cmd = self.normalizeAngle(yaw)
def setRobotCmd(self, x, y, yaw):
self.robot_pos_cmd[0] = x
self.robot_pos_cmd[1] = y
self.robot_yaw_cmd = self.normalizeAngle(yaw)
self.robot_pos = self.robot_pos_cmd + \
np.random.randn(2) * NOISE_VAR_ROBOT_POS # add noise
self.robot_yaw = self.normalizeAngle(
self.robot_yaw_cmd + np.random.randn() * NOISE_VAR_ROBOT_YAW) # add noise
def setTargetCmd(self, x, y, yaw):
self.target_pos_cmd[0] = x
self.target_pos_cmd[1] = y
self.target_yaw_cmd = self.normalizeAngle(yaw)
self.target_pos = self.target_pos_cmd
self.target_yaw = self.normalizeAngle(self.target_yaw_cmd)
def forward(self, action=None):
"""
Move one step forward (Translation)
"""
self.setRobotCmd(
self.robot_pos_cmd[0] + self.step_distance, self.robot_pos_cmd[1], self.robot_yaw_cmd)
def backward(self, action=None):
"""
Move one step backward
"""
self.setRobotCmd(
self.robot_pos_cmd[0] - self.step_distance, self.robot_pos_cmd[1], self.robot_yaw_cmd)
def left(self, action=None):
"""
Translate to the left
"""
self.setRobotCmd(
self.robot_pos_cmd[0], self.robot_pos_cmd[1] + self.step_distance, self.robot_yaw_cmd)
def right(self, action=None):
"""
Translate to the right
"""
self.setRobotCmd(
self.robot_pos_cmd[0], self.robot_pos_cmd[1] - self.step_distance, self.robot_yaw_cmd)
def moveContinous(self, action):
"""
Perform a continuous displacement of dx, dy
"""
self.setRobotCmd(
self.robot_pos_cmd[0] + action[0], self.robot_pos_cmd[1] + action[1], self.robot_yaw_cmd)
def moveByVelocityCmd(self, speed_x, speed_y, speed_yaw):
"""
simuate the robot moved by velocity command
This function is assumed to be called at a frequency RL_CONTROL_FREQ in the simulation world
:param speed_x: (float) linear speed along x-axis (m/s) (forward-backward), in robot local coordinate
:param speed_y: (float) linear speed along y-axis (m/s) (left-right), in robot local coordinate
:param speed_yaw: (float) rotation speed of robot around z-axis (rad/s), in robot local coordinate
"""
# calculate the robot position that it should be at this moment, so it should be driven by last command
# Assume in 1/RL_CONTROL_FREQ, the heading remains the same (not true,
# but should be approximately work if RL_CONTROL_FREQ is high enough)
# translate the last velocity cmd in robot local coordiante to position cmd in gound coordiante
cos_direction = np.cos(self.robot_yaw)
sin_direction = np.sin(self.robot_yaw)
ground_pos_cmd_x = self.robot_pos[0] + (self.last_linear_velocity_cmd[0] *
cos_direction - self.last_linear_velocity_cmd[1] * sin_direction)/RL_CONTROL_FREQ
ground_pos_cmd_y = self.robot_pos[1] + (self.last_linear_velocity_cmd[1] *
cos_direction + self.last_linear_velocity_cmd[0] * sin_direction)/RL_CONTROL_FREQ
ground_yaw_cmd = self.robot_yaw + self.last_rot_velocity_cmd/RL_CONTROL_FREQ
self.setRobotCmd(ground_pos_cmd_x, ground_pos_cmd_y, ground_yaw_cmd)
# save the command of this moment
self.last_linear_velocity_cmd[0] = speed_x
self.last_linear_velocity_cmd[1] = speed_y
self.last_rot_velocity_cmd = speed_yaw
def moveByWheelsCmd(self, left_speed, front_speed, right_speed):
"""
simuate the robot moved by wheel speed command
This function is assumed to be called at a frequency RL_CONTROL_FREQ in the simulation world
:param left_speed: (float) linear speed of left | |
<reponame>xiaozheshao/Policy-based-routing-verifier
from topo import *
import sys
import getopt
from z3 import *
import pickle
import gc
import threading
import ctypes
import random
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
# This is the latest version: JUly 3th 2019
# We try to implement the paper version.
# We implement local preference for each record.
# Work for all possible policies
# local preference range: customer (1, 2); peer (2, 3, 4, 5); provider (5, 6, 7); ???
#
class Verifier:
def __init__(self, opts, args, flag = True, fakenum = 0, violateexport = 0.0, violateimport = 0.0):
# t1 = Tactic("simplify")
# t2 = Tactic("propagate-values")
# t3 = Tactic("solve-eqs")
# t4 = Tactic("bit-blast")
# t5 = Tactic("smt")
# self.solver = Then(t1, t2, t3, t4, t5).solver()
self.solver = Solver()
# z3.set_option(unsat_core=True)
self.topo = None
for opt, arg in opts:
if opt == "-r":
# read AS relationship from CAIDA file
self.topo = Topo(arg, fakenum = fakenum, violateexport = violateexport, violateimport = violateimport)
print "start generate special policy"
# generate sp ecial policies, PeerBoost, in the AS topology:
self.topo.gen_PB(flag)
print "finish generate special policy"
#based on topology and policy, generate announcement graph
self.topo.buildgraph()
# clear the state to original state
def clear(self):
self.solver = Solver()
self.topo.clear()
def setupleaves(self):
self.solver.add(And(self.topo.setupleaves()))
def setwaypoint(self, attacker):
# set the attacker AS as the waypoint target
asnode = self.topo.dict[attacker]
asnode.iswaypoint = True
def isselected(self, node):
# return whether the node is selected after trim
return self.topo.selected(node)
def setATTACKER(self, att):
self.topo.dict[att].isattacker = True
def addexport_without(self, withouts, link_break = False):
for asid, node in self.topo.dict.iteritems():
if asid not in withouts:
self.solver.add(node.exportconstraints(self.topo.dict, self.topo.ASNUM, link_break))
def setup_ITE_neighbor(self, dest, opt = False, selected_neighbor = 0):
node = self.topo.dict[dest]
self.solver.add(node.exportconstraints_prepend(self.topo.dict, self.topo.ASNUM, opt = opt,
selected_neighbor = selected_neighbor))
# translate export policies and add into solver
def addexport(self, link_break = False):
for asid, node in self.topo.dict.iteritems():
if not node.isattacker:
self.solver.add(node.exportconstraints(self.topo.dict, self.topo.ASNUM, link_break))
def addimport_without(self, withouts):
for asid, node in self.topo.dict.iteritems():
if not node.origin and asid not in withouts:
self.solver.add(node.importconstraints(self.topo.dict, self.topo.ASNUM))
def addimport_bgp(self, ases):
for asid, node in self.topo.dict.iteritems():
#if not node.isattacker and not node.origin:
if asid in ases:
self.solver.add(node.importconstraints_bgp(self.topo.dict, self.topo.ASNUM))
# translate import policies and add into solver
def addimport(self):
for asid, node in self.topo.dict.iteritems():
#if not node.isattacker and not node.origin:
if not node.origin:
self.solver.add(node.importconstraints(self.topo.dict, self.topo.ASNUM))
def devideproperty(self, aspath, asnum, attindex, power):
num = asnum ** power
return (aspath / num) % asnum == attindex
# the victim uses a best route going through the attacker
def checkwaypoint(self, victim, attacker):
victimnet = self.topo.dict[victim]
self.solver.add(victimnet.best.flag == True)
#victim has best route
def property_hasroute(self, victim):
self.solver.add(self.topo.dict[victim].best.valid == True)
def property_allhasroute(self):
p = []
for id, net in self.topo.dict.iteritems():
p.append(net.best.valid == True)
self.solver.add(Not(And(p)))
#self.property_hasroute(id)
def addproperty(self, victim, attacker):
# add property to check traffic attraction
# whether the best route of victim goes through attacker?
attindex = self.topo.dict[attacker].index
victimnet = self.topo.dict[victim]
orlist = []
for i in range(0,10):
orlist.append(self.devideproperty(victimnet.best.aspath, self.topo.ASNUM, attindex, i))
# property = Or(victimnet.best.aspath % self.topo.ASNUM == attindex, (victimnet.best.aspath / self.topo.ASNUM) % self.topo.ASNUM == attindex)
self.solver.add(Or(orlist))
def printresult(s):
list = []
for d in s.model().decls():
list.append("%s = %s" % (d.name(), s.model()[d]))
list.sort()
count = 0
for d in list:
print d
count += 1
if count % 6 == 0:
print "\n"
# alltrim used for ibgp
def gen_rsys(opts, args, ORIGIN, VICTIM, ATTACKER, rsys = None, calprovider = False, stat = None, trim = True,
alltrim = False, policyaware = False, violateexport = 0.0, violateimport = 0.0):
localtime = time.asctime(time.localtime(time.time()))
print "start initialize verifier", localtime
# sys.setrecursionlimit(65535)
# initialize verifier. False means no special policy generation
if rsys == None:
rsys = Verifier(opts, args, False, violateexport = violateexport, violateimport = violateimport)
if calprovider:
localtime = time.asctime(time.localtime(time.time()))
print "compute provider cone start", localtime
rsys.topo.computeprovidercone()
localtime = time.asctime(time.localtime(time.time()))
print "compute provider cone end", localtime
else:
t1 = time.time()
rsys.clear()
t2 = time.time()
if stat:
stat["cleartime"] = t2 - t1
localtime = time.asctime(time.localtime(time.time()))
print "finish initialize verifier", localtime
# !!!!! set waypoint for ATTACKER
if ORIGIN != ATTACKER:
rsys.setwaypoint(ATTACKER)
# set up origin (should before import constraints)
# when ORIGIN == ATTACKER, there is no attacker
# ORIGIN = 1
# ATTACKER = 1
# VICTIM = 25
rsys.solver.add(rsys.topo.selectorigin(ORIGIN))
# rsys.topo.selectattacker(ATTACKER)
localtime = time.asctime(time.localtime(time.time()))
print "start topotrim", localtime
# origin, victim, attacker
t1 = time.time()
if not alltrim:
# rsys.topo.topotrim(ORIGIN, VICTIM, ATTACKER, trim = trim)
rsys.topo.topotrim_new([ORIGIN,ATTACKER], [VICTIM], trim = trim, policyaware = policyaware)
else:
for id, net in rsys.topo.dict.iteritems():
rsys.topo.topotrim(ORIGIN, id, ORIGIN, trim = trim)
for id, net in rsys.topo.dict.iteritems():
if not net.unode.selected:
net.unode.visited = False
if not net.dnode.selected:
net.dnode.visited = False
t2 = time.time()
if stat:
stat["trimtime"] = t2 - t1
localtime = time.asctime(time.localtime(time.time()))
print "finish topotrim", localtime
rsys.topo.trimstat()
print ("Attacker:", ATTACKER, " neighbors.")
rsys.topo.neighborstat(ATTACKER)
# generate necessary records after dag trim
t1 = time.time()
rsys.topo.gen_records()
t2 = time.time()
if stat:
stat["gen_records_time"] = t2 -t1
localtime = time.asctime(time.localtime(time.time()))
print "finish record generation trim.", localtime
return rsys
def gen_bestroute(opts, args, inORIGIN, inVICTIM, trim = True, num = 0):
states={}
states["origin"] = inORIGIN
rsys = gen_rsys(opts, args, inORIGIN, inVICTIM, inORIGIN, trim = trim)
t1 = time.time()
localtime = time.asctime(time.localtime(t1))
print ("start generate constraints", localtime)
# add export constraints
rsys.addexport()
# add import constraints
rsys.addimport()
# when it is not trimed, the leaves should be setup for valid#
rsys.setupleaves()
t2 = time.time()
localtime = time.asctime(time.localtime(t2))
print ("finish generate constraints", localtime)
states["gen_constraints"] = t2 - t1
# add traffic attraction property
# rsys.addproperty(VICTIM, ATTACKER)
t1 = time.time()
localtime = time.asctime(time.localtime(t1))
print "start checking", localtime
rst = rsys.solver.check()
t2 = time.time()
localtime = time.asctime(time.localtime(t2))
print "result:", rst, localtime
print rsys.solver.statistics()
states["result"] = rst
states["checking_time"] = t2 - t1
states["dotrim"] = trim
states["asnum"] = len(rsys.topo.dict)
states["linknum"] = rsys.topo.relationnum
print(states)
with open("./stablestate_" + str(num) + ".txt", "w") as f:
pickle.dump(states, f)
if rst == sat:
# printresult(rsys.solver)
# return
print(rst)
elif rst == unsat:
print(rst)
# for c in rsys.solver.assertions():
# print c
return rst
def waypoint_link_break(opts, args, ORIGIN, SOURCE, POINT):
rsys = gen_rsys(opts, args, ORIGIN, SOURCE, ORIGIN)
rsys.addexport(link_break=True)
rsys.addimport()
rsys.addproperty(SOURCE, POINT)
if not rsys.isselected(POINT):
print ("[waypoint_link_break]", POINT, " is not selected after trim.", SOURCE, " will not use ", POINT)
return
localtime = time.asctime(time.localtime(time.time()))
print "[waypoint_link_break] start checking", localtime
rst = rsys.solver.check()
localtime = time.asctime(time.localtime(time.time()))
print "[waypoint_link_break] result:", rst, localtime
print rsys.solver.statistics()
if rst == sat:
print ("[waypoint_link_break]", SOURCE, " might use ", POINT, " to reach ", ORIGIN)
printresult(rsys.solver)
# return
elif rst == unsat:
print ("[waypoint_link_break]", SOURCE, " will not use ", POINT, " to reach ", ORIGIN)
for c in rsys.solver.assertions():
print c
return rst
def generatefaketopo(opts, args, num):
rsys = Verifier(opts, args, False, fakenum = num)
def check_combination_opt(rsys, pset, qset, att):
attacker = rsys.topo.dict[att]
rsys.solver.push()
rsys.solver.add(attacker.attackstrategy_combine_opt(rsys.topo.dict, rsys.topo.ASNUM, pset, qset))
rst = rsys.solver.check()
rsys.solver.pop()
return rst
def check_combination(rsys, p, q, att):
attacker = rsys.topo.dict[att]
rsys.solver.push()
# TODO
rsys.solver.add(attacker.attackstrategy_combine(rsys.topo.dict, rsys.topo.ASNUM, p, q))
rst = rsys.solver.check()
rsys.solver.pop()
return rst
# TRY ATTACK announcement one by one. and record the total verification time.
def veri_trafficattraction_allinone(opts, args, ORIGIN, VICTIM, ATTACKER, stat, rsys = None):
rsys = gen_rsys(opts, args, ORIGIN, VICTIM, ATTACKER, rsys=rsys, calprovider= True)
rsys.setATTACKER(ATTACKER)
# add export constraints
rsys.addexport()
# add import constraints
rsys.addimport()
# done in rsys
rsys.checkwaypoint(VICTIM, ATTACKER)
# rsys.addproperty(VICTIM, ATTACKER)
attacker = rsys.topo.dict[ATTACKER]
rsys.solver.set("timeout", 600000)
localtime = time.asctime(time.localtime(time.time()))
print ("start to check", localtime)
'''
t1 = time.time()
vnet = rsys.topo.dict[VICTIM]
onet = rsys.topo.dict[ORIGIN]
constant = vnet.providercone & (~onet.providercone)
plist = []
for p in rsys.topo.att_neighbors:
# find potential t for all providers of the attacker
pnet = rsys.topo.dict[p]
rst = constant & pnet.providercone
tlist = []
for i in range(0, rsys.topo.ASNUMTotal):
if rst[i] == 1:
tlist.append(rsys.topo.providerindex2as[i].asid)
selecteflag = False
for t in tlist:
tnet = rsys.topo.dict[t]
if tnet.unode.selected or tnet.dnode.selected:
selecteflag = True
break
if selecteflag:
plist.append(p)
t2 = time.time()
'''
# pset = set(plist)
# qset = rsys.topo.att_allneighbors - pset
pset = rsys.topo.att_allneighbors
qset = set()
totalt1 = time.time()
rsys.solver.add(attacker.attackstrategy_allinone(rsys.topo.dict, rsys.topo.ASNUM, pset, qset))
rst = rsys.solver.check()
# rsys.solver.pop()
totalt2 = time.time()
stat["sumtimeallinone"] = totalt2 - totalt1
localtime = time.asctime(time.localtime(time.time()))
print ("finish the phase 1", localtime)
stat["plist"] = pset
stat["plist_num"] = len(pset)
stat["resultallinone"] = rst
# return rsys
# print ("+++++++++++++++++++++++++++++++++++ | |
sample_weight=sample_weight)
return cv_estimator.alpha_
def _get_theta_hat(self, X, sample_weight):
# Assumes that X has already been offset
n_samples, n_features = X.shape
# Special case: n_features=1
if n_features == 1:
C_hat = np.ones((1, 1))
tausq = (X.T @ X / n_samples).flatten()
return np.diag(1 / tausq) @ C_hat
# Compute Lasso coefficients for the columns of the design matrix
results = Parallel(n_jobs=self.n_jobs)(
delayed(_get_theta_coefs_and_tau_sq)(i, X, sample_weight,
self.alpha_cov, self.n_alphas_cov,
self.max_iter, self.tol, self.random_state)
for i in range(n_features))
coefs, tausq = zip(*results)
coefs = np.array(coefs)
tausq = np.array(tausq)
# Compute C_hat
C_hat = np.diag(np.ones(n_features))
C_hat[0][1:] = -coefs[0]
for i in range(1, n_features):
C_hat[i][:i] = -coefs[i][:i]
C_hat[i][i + 1:] = -coefs[i][i:]
# Compute theta_hat
theta_hat = np.diag(1 / tausq) @ C_hat
return theta_hat
def _get_unscaled_coef_var(self, X, theta_hat, sample_weight):
if sample_weight is not None:
norm_weights = sample_weight / np.sum(sample_weight)
sigma = X.T @ (norm_weights.reshape(-1, 1) * X)
else:
sigma = np.matmul(X.T, X) / X.shape[0]
_unscaled_coef_var = np.matmul(
np.matmul(theta_hat, sigma), theta_hat.T) / X.shape[0]
return _unscaled_coef_var
class MultiOutputDebiasedLasso(MultiOutputRegressor):
"""Debiased MultiOutputLasso model.
Implementation was derived from <https://arxiv.org/abs/1303.0518>.
Applies debiased lasso once per target. If only a flat target is passed in,
it reverts to the DebiasedLasso algorithm.
Parameters
----------
alpha : string | float, optional. Default='auto'.
Constant that multiplies the L1 term. Defaults to 'auto'.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
n_alphas : int, optional, default 100
How many alphas to try if alpha='auto'
alpha_cov : string | float, optional, default 'auto'
The regularization alpha that is used when constructing the pseudo inverse of
the covariance matrix Theta used to for correcting the lasso coefficient. Each
such regression corresponds to the regression of one feature on the remainder
of the features.
n_alphas_cov : int, optional, default 10
How many alpha_cov to try if alpha_cov='auto'.
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random
number generator; If None, the random number generator is the
:class:`~numpy.random.mtrand.RandomState` instance used by :mod:`np.random<numpy.random>`. Used when
``selection='random'``.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
n_jobs : int or None, default None
How many jobs to use whenever parallelism is invoked
Attributes
----------
coef_ : array, shape (n_targets, n_features) or (n_features,)
Parameter vector (w in the cost function formula).
intercept_ : array, shape (n_targets, ) or float
Independent term in decision function.
selected_alpha_ : array, shape (n_targets, ) or float
Penalty chosen through cross-validation, if alpha='auto'.
coef_stderr_ : array, shape (n_targets, n_features) or (n_features, )
Estimated standard errors for coefficients (see ``coef_`` attribute).
intercept_stderr_ : array, shape (n_targets, ) or float
Estimated standard error intercept (see ``intercept_`` attribute).
"""
def __init__(self, alpha='auto', n_alphas=100, alpha_cov='auto', n_alphas_cov=10,
fit_intercept=True,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False,
random_state=None, selection='cyclic', n_jobs=None):
self.estimator = DebiasedLasso(alpha=alpha, n_alphas=n_alphas, alpha_cov=alpha_cov, n_alphas_cov=n_alphas_cov,
fit_intercept=fit_intercept,
precompute=precompute, copy_X=copy_X, max_iter=max_iter,
tol=tol, warm_start=warm_start,
random_state=random_state, selection=selection,
n_jobs=n_jobs)
super().__init__(estimator=self.estimator, n_jobs=n_jobs)
def fit(self, X, y, sample_weight=None):
"""Fit the multi-output debiased lasso model.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Input data.
y : array, shape (n_samples, n_targets) or (n_samples, )
Target. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample.
The weights will be normalized internally.
"""
# Allow for single output as well
# When only one output is passed in, the MultiOutputDebiasedLasso behaves like the DebiasedLasso
self.flat_target = False
if np.ndim(y) == 1:
self.flat_target = True
y = np.asarray(y).reshape(-1, 1)
super().fit(X, y, sample_weight)
# Set coef_ attribute
self._set_attribute("coef_")
# Set intercept_ attribute
self._set_attribute("intercept_",
condition=self.estimators_[0].fit_intercept,
default=0.0)
# Set selected_alpha_ attribute
self._set_attribute("selected_alpha_",
condition=(self.estimators_[0].alpha == 'auto'))
# Set coef_stderr_
self._set_attribute("coef_stderr_")
# intercept_stderr_
self._set_attribute("intercept_stderr_")
return self
def predict(self, X):
"""Get the prediction using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
Returns
-------
prediction : array like, shape (n_samples, ) or (n_samples, n_targets)
The prediction at each point.
"""
pred = super().predict(X)
if self.flat_target:
pred = pred.flatten()
return pred
def prediction_stderr(self, X):
"""Get the standard error of the predictions using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
Returns
-------
prediction_stderr : array like, shape (n_samples, ) or (n_samples, n_targets)
The standard error of each coordinate of the output at each point we predict
"""
n_estimators = len(self.estimators_)
X = check_array(X)
pred_stderr = np.empty((X.shape[0], n_estimators))
for i, estimator in enumerate(self.estimators_):
pred_stderr[:, i] = estimator.prediction_stderr(X)
if self.flat_target:
pred_stderr = pred_stderr.flatten()
return pred_stderr
def predict_interval(self, X, alpha=0.1):
"""Build prediction confidence intervals using the debiased lasso.
Parameters
----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Samples.
alpha: optional float in [0, 1] (Default=0.1)
The overall level of confidence of the reported interval.
The alpha/2, 1-alpha/2 confidence interval is reported.
Returns
-------
(y_lower, y_upper) : tuple of arrays, shape (n_samples, n_targets) or (n_samples, )
Returns lower and upper interval endpoints.
"""
n_estimators = len(self.estimators_)
X = check_array(X)
y_lower = np.empty((X.shape[0], n_estimators))
y_upper = np.empty((X.shape[0], n_estimators))
for i, estimator in enumerate(self.estimators_):
y_lower[:, i], y_upper[:, i] = estimator.predict_interval(X, alpha=alpha)
if self.flat_target:
y_lower = y_lower.flatten()
y_upper = y_upper.flatten()
return y_lower, y_upper
def coef__interval(self, alpha=0.1):
"""Get a confidence interval bounding the fitted coefficients.
Parameters
----------
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(coef_lower, coef_upper) : tuple of arrays, shape (n_targets, n_coefs) or (n_coefs, )
Returns lower and upper interval endpoints for the coefficients.
"""
n_estimators = len(self.estimators_)
coef_lower = np.empty((n_estimators, self.estimators_[0].coef_.shape[0]))
coef_upper = np.empty((n_estimators, self.estimators_[0].coef_.shape[0]))
for i, estimator in enumerate(self.estimators_):
coef_lower[i], coef_upper[i] = estimator.coef__interval(alpha=alpha)
if self.flat_target == 1:
coef_lower = coef_lower.flatten()
coef_upper = coef_upper.flatten()
return coef_lower, coef_upper
def intercept__interval(self, alpha=0.1):
"""Get a confidence interval bounding the fitted intercept.
Parameters
----------
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
(intercept_lower, intercept_upper) : tuple of arrays of size (n_targets, ) or tuple of floats
Returns lower and upper interval endpoints for the intercept.
"""
if len(self.estimators_) == 1:
return self.estimators_[0].intercept__interval(alpha=alpha)
else:
intercepts = np.array([estimator.intercept__interval(alpha=alpha) for estimator in self.estimators_])
return intercepts[:, 0], intercepts[:, 1]
def get_params(self, deep=True):
"""Get parameters for this estimator."""
return self.estimator.get_params(deep=deep)
def set_params(self, **params):
"""Set parameters for this estimator."""
self.estimator.set_params(**params)
def _set_attribute(self, attribute_name, condition=True, default=None):
if condition:
if not self.flat_target:
attribute_value = np.array([getattr(estimator, attribute_name) for estimator in self.estimators_])
else:
attribute_value = getattr(self.estimators_[0], attribute_name)
else:
attribute_value = default
setattr(self, | |
(asyncio.CancelledError, SystemExit, KeyboardInterrupt):
raise
except BaseException as e:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, keyword_token),
message=str(e),
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code=type(e).__qualname__,
)
)
if result is not None and analyse_run_keywords:
await self._analyse_run_keyword(result, value, argument_tokens)
return result
async def _analyse_run_keyword(
self, keyword_doc: Optional[KeywordDoc], node: ast.AST, argument_tokens: List[Token]
) -> List[Token]:
if keyword_doc is None or not keyword_doc.is_any_run_keyword():
return argument_tokens
if keyword_doc.is_run_keyword() and len(argument_tokens) > 0 and is_non_variable_token(argument_tokens[0]):
await self._analyze_keyword_call(argument_tokens[0].value, node, argument_tokens[0], argument_tokens[1:])
return argument_tokens[1:]
elif (
keyword_doc.is_run_keyword_with_condition()
and len(argument_tokens) > 1
and is_non_variable_token(argument_tokens[1])
):
await self._analyze_keyword_call(argument_tokens[1].value, node, argument_tokens[1], argument_tokens[2:])
return argument_tokens[2:]
elif keyword_doc.is_run_keywords():
for t in argument_tokens:
if is_non_variable_token(t):
await self._analyze_keyword_call(t.value, node, t, [])
return []
elif keyword_doc.is_run_keyword_if() and len(argument_tokens) > 1 and is_non_variable_token(argument_tokens[1]):
def skip_args() -> None:
nonlocal argument_tokens
while argument_tokens:
if argument_tokens[0].value in ["ELSE", "ELSE IF"]:
break
argument_tokens = argument_tokens[1:]
result = await self._analyze_keyword_call(
argument_tokens[1].value,
node,
argument_tokens[1],
argument_tokens[2:],
analyse_run_keywords=False,
)
argument_tokens = argument_tokens[2:]
if result is not None and result.is_any_run_keyword():
argument_tokens = await self._analyse_run_keyword(result, node, argument_tokens)
skip_args()
while argument_tokens:
if argument_tokens[0].value == "ELSE" and len(argument_tokens) > 1:
result = await self._analyze_keyword_call(
argument_tokens[1].value,
node,
argument_tokens[1],
argument_tokens[2:],
analyse_run_keywords=False,
)
argument_tokens = argument_tokens[2:]
if result is not None and result.is_any_run_keyword():
argument_tokens = await self._analyse_run_keyword(result, node, argument_tokens)
skip_args()
break
elif argument_tokens[0].value == "ELSE IF" and len(argument_tokens) > 2:
result = await self._analyze_keyword_call(
argument_tokens[2].value,
node,
argument_tokens[2],
argument_tokens[3:],
analyse_run_keywords=False,
)
argument_tokens = argument_tokens[3:]
if result is not None and result.is_any_run_keyword():
argument_tokens = await self._analyse_run_keyword(result, node, argument_tokens)
skip_args()
else:
break
return argument_tokens
async def visit_Fixture(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import Fixture
value = cast(Fixture, node)
keyword_token = cast(Token, value.get_token(RobotToken.NAME))
# TODO: calculate possible variables in NAME
if keyword_token is not None and is_non_variable_token(keyword_token):
await self._analyze_keyword_call(
value.name, value, keyword_token, [cast(Token, e) for e in value.get_tokens(RobotToken.ARGUMENT)]
)
await self.generic_visit(node)
async def visit_TestTemplate(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import TestTemplate
value = cast(TestTemplate, node)
keyword_token = cast(Token, value.get_token(RobotToken.NAME))
# TODO: calculate possible variables in NAME
if keyword_token is not None and is_non_variable_token(keyword_token):
await self._analyze_keyword_call(value.value, value, keyword_token, [])
await self.generic_visit(node)
async def visit_Template(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import Template
value = cast(Template, node)
keyword_token = cast(Token, value.get_token(RobotToken.NAME))
# TODO: calculate possible variables in NAME
if keyword_token is not None and is_non_variable_token(keyword_token):
await self._analyze_keyword_call(value.value, value, keyword_token, [])
await self.generic_visit(node)
async def visit_KeywordCall(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import KeywordCall
value = cast(KeywordCall, node)
keyword_token = cast(RobotToken, value.get_token(RobotToken.KEYWORD))
if value.assign and not value.keyword:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, value.get_token(RobotToken.ASSIGN)),
message="Keyword name cannot be empty.",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code="KeywordError",
)
)
else:
await self._analyze_keyword_call(
value.keyword, value, keyword_token, [cast(Token, e) for e in value.get_tokens(RobotToken.ARGUMENT)]
)
if not self.current_testcase_or_keyword_name:
self._results.append(
Diagnostic(
range=range_from_token_or_node(value, value.get_token(RobotToken.ASSIGN)),
message="Code is unreachable.",
severity=DiagnosticSeverity.HINT,
source=DIAGNOSTICS_SOURCE_NAME,
tags=[DiagnosticTag.Unnecessary],
)
)
await self.generic_visit(node)
async def visit_TestCase(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.blocks import TestCase
from robot.parsing.model.statements import TestCaseName
testcase = cast(TestCase, node)
if not testcase.name:
name_token = cast(TestCaseName, testcase.header).get_token(RobotToken.TESTCASE_NAME)
self._results.append(
Diagnostic(
range=range_from_token_or_node(testcase, name_token),
message="Test case name cannot be empty.",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code="KeywordError",
)
)
self.current_testcase_or_keyword_name = testcase.name
try:
await self.generic_visit(node)
finally:
self.current_testcase_or_keyword_name = None
async def visit_Keyword(self, node: ast.AST) -> None: # noqa: N802
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.blocks import Keyword
from robot.parsing.model.statements import Arguments, KeywordName
keyword = cast(Keyword, node)
if keyword.name:
name_token = cast(KeywordName, keyword.header).get_token(RobotToken.KEYWORD_NAME)
if is_embedded_keyword(keyword.name) and any(
isinstance(v, Arguments) and len(v.values) > 0 for v in keyword.body
):
self._results.append(
Diagnostic(
range=range_from_token_or_node(keyword, name_token),
message="Keyword cannot have both normal and embedded arguments.",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code="KeywordError",
)
)
else:
name_token = cast(KeywordName, keyword.header).get_token(RobotToken.KEYWORD_NAME)
self._results.append(
Diagnostic(
range=range_from_token_or_node(keyword, name_token),
message="Keyword name cannot be empty.",
severity=DiagnosticSeverity.ERROR,
source=DIAGNOSTICS_SOURCE_NAME,
code="KeywordError",
)
)
self.current_testcase_or_keyword_name = keyword.name
try:
await self.generic_visit(node)
finally:
self.current_testcase_or_keyword_name = None
@dataclass
class LibraryEntry:
name: str
import_name: str
library_doc: LibraryDoc
args: Tuple[Any, ...] = ()
alias: Optional[str] = None
import_range: Range = field(default_factory=lambda: Range.zero())
import_source: str = ""
def __str__(self) -> str:
result = self.import_name
if self.args:
result += f" {str(self.args)}"
if self.alias:
result += f" WITH NAME {self.alias}"
return result
@dataclass
class ResourceEntry(LibraryEntry):
imports: List[Import] = field(default_factory=lambda: [])
variables: List[VariableDefinition] = field(default_factory=lambda: [])
@dataclass
class VariablesEntry(LibraryEntry):
pass
class Namespace:
_logger = LoggingDescriptor()
@_logger.call
def __init__(
self,
imports_manager: ImportsManager,
model: ast.AST,
source: str,
invalidated_callback: Callable[[Namespace], None],
document: Optional[TextDocument] = None,
) -> None:
super().__init__()
self.imports_manager = imports_manager
self.imports_manager.libraries_changed.add(self.libraries_changed)
self.imports_manager.resources_changed.add(self.resources_changed)
self.model = model
self.source = source
self.invalidated_callback = invalidated_callback
self._document = weakref.ref(document) if document is not None else None
self._libraries: OrderedDict[str, LibraryEntry] = OrderedDict()
self._resources: OrderedDict[str, ResourceEntry] = OrderedDict()
self._variables: OrderedDict[str, VariablesEntry] = OrderedDict()
self._initialized = False
self._initialize_lock = asyncio.Lock()
self._analyzed = False
self._analyze_lock = asyncio.Lock()
self._library_doc: Optional[LibraryDoc] = None
self._imports: Optional[List[Import]] = None
self._own_variables: Optional[List[VariableDefinition]] = None
self._diagnostics: List[Diagnostic] = []
self._keywords: Optional[List[KeywordDoc]] = None
self._loop = asyncio.get_event_loop()
# TODO: how to get the search order from model
self.search_order: Tuple[str, ...] = ()
@property
def document(self) -> Optional[TextDocument]:
return self._document() if self._document is not None else None
async def libraries_changed(self, sender: Any, params: List[LibraryDoc]) -> None:
for p in params:
if any(e for e in self._libraries.values() if e.library_doc == p):
self.invalidated_callback(self)
break
async def resources_changed(self, sender: Any, params: List[LibraryDoc]) -> None:
for p in params:
if any(e for e in self._resources.values() if e.library_doc.source == p.source):
self.invalidated_callback(self)
break
@_logger.call
async def get_diagnostisc(self) -> List[Diagnostic]:
await self.ensure_initialized()
await self._analyze()
return self._diagnostics
@_logger.call
async def get_libraries(self) -> OrderedDict[str, LibraryEntry]:
await self.ensure_initialized()
return self._libraries
@_logger.call
async def get_resources(self) -> OrderedDict[str, ResourceEntry]:
await self.ensure_initialized()
return self._resources
async def get_library_doc(self) -> LibraryDoc:
from ..parts.documents_cache import DocumentType
if self._library_doc is None:
model_type = ""
if hasattr(self.model, "model_type"):
t = getattr(self.model, "model_type")
if t == DocumentType.RESOURCE:
model_type = "RESOURCE"
elif t == DocumentType.GENERAL:
model_type = "TESTCASE"
elif t == DocumentType.INIT:
model_type = "INIT"
self._library_doc = await self.imports_manager.get_libdoc_from_model(
self.model, self.source, model_type=model_type
)
return self._library_doc
@_logger.call
async def ensure_initialized(self) -> bool:
async with self._initialize_lock:
if not self._initialized:
imports = await self.get_imports()
if self.document is not None:
old_imports: List[Import] = self.document.get_data(Namespace)
if old_imports is None:
self.document.set_data(Namespace, imports)
elif old_imports != imports:
new_imports = []
for e in old_imports:
if e in imports:
new_imports.append(e)
for e in imports:
if e not in new_imports:
new_imports.append(e)
self.document.set_data(Namespace, new_imports)
await self._import_default_libraries()
await self._import_imports(imports, str(Path(self.source).parent), top_level=True)
self._initialized = True
return self._initialized
@property
def initialized(self) -> bool:
return self._initialized
async def get_imports(self) -> List[Import]:
if self._imports is None:
self._imports = await ImportVisitor().get(self.source, self.model)
return self._imports
async def get_own_variables(self) -> List[VariableDefinition]:
if self._own_variables is None:
self._own_variables = await VariablesVisitor().get(self.source, self.model)
return self._own_variables
_builtin_variables: Optional[List[BuiltInVariableDefinition]] = None
@classmethod
def get_builtin_variables(cls) -> List[BuiltInVariableDefinition]:
if cls._builtin_variables is None:
cls._builtin_variables = [BuiltInVariableDefinition(0, 0, 0, 0, "", n, None) for n in BUILTIN_VARIABLES]
return cls._builtin_variables
def get_command_line_variables(self) -> List[VariableDefinition]:
if self.imports_manager.config is None:
return []
return [
CommandLineVariableDefinition(0, 0, 0, 0, "", f"${{{k}}}", None)
for k in self.imports_manager.config.variables.keys()
]
async def get_variables(
self, nodes: Optional[List[ast.AST]] = None, position: Optional[Position] = None
) -> Dict[VariableMatcher, VariableDefinition]:
from robot.parsing.model.blocks import Keyword, TestCase
await self.ensure_initialized()
result: Dict[VariableMatcher, VariableDefinition] = {}
async for var in async_chain(
*[
await BlockVariableVisitor().get(self.source, n, position)
for n in nodes or []
if isinstance(n, (Keyword, TestCase))
],
(e for e in await self.get_own_variables()),
*(e.variables for e in self._resources.values()),
(e for e in self.get_command_line_variables()),
(e for e in self.get_builtin_variables()),
):
if var.name is not None and VariableMatcher(var.name) not in result.keys():
result[VariableMatcher(var.name)] = var
return result
async def find_variable(
self, name: str, nodes: Optional[List[ast.AST]], position: Optional[Position] = None
) -> Optional[VariableDefinition]:
return (await self.get_variables(nodes, position)).get(VariableMatcher(name), None)
async def _import_imports(self, imports: Iterable[Import], base_dir: str, *, top_level: bool = False) -> None:
async def _import(value: Import) -> Optional[LibraryEntry]:
result: Optional[LibraryEntry] = None
try:
if isinstance(value, LibraryImport):
if value.name is None:
raise NameSpaceError("Library setting requires value.")
result = await self._get_library_entry(
value.name, value.args, value.alias, base_dir, sentinel=value
)
result.import_range = value.range()
result.import_source = value.source
if (
top_level
and result.library_doc.errors is None
and (len(result.library_doc.keywords) == 0 and not bool(result.library_doc.has_listener))
):
| |
get_stat_value(
self,
stat_name: str,
stat_category: str = "allTime",
character_id: Optional[int | str] = None,
) -> int | float:
"""Returns the value of the given stat. Int if no decimals, else float"""
possible_stat_categories = [
"allTime",
"allPvE",
"allPvP",
]
assert stat_category in possible_stat_categories, f"Stat must be one of {possible_stat_categories}"
topic = "merged" if stat_category == "allTime" else "results"
stats = await self.get_stats()
# character stats
if character_id:
found = False
for char in stats["characters"]:
if char["characterId"] == str(character_id):
stats = char
found = True
if not found:
raise CustomException("CharacterIdNotFound")
# total stats
else:
stats = stats["mergedAllCharacters"]
stats = stats[topic][stat_category]
if stat_category != "allTime":
stats = stats["allTime"]
stat: float = stats[stat_name]["basic"]["value"]
return int(stat) if stat.is_integer() else stat
async def get_artifact_level(self) -> ValueModel:
"""Returns the seasonal artifact data"""
result = await self.__get_profile()
return ValueModel(value=result["profileProgression"]["data"]["seasonalArtifact"]["powerBonus"])
async def get_season_pass_level(self) -> ValueModel:
"""Returns the seasonal pass level"""
# get the current season pass hash
async with asyncio.Lock():
if not cache.season_pass_definition:
cache.season_pass_definition = await destiny_manifest.get_current_season_pass(db=self.db)
# get a character id since they are character specific
character_id = (await self.get_character_ids())[0]
result = await self.__get_profile()
character_data = result["characterProgressions"]["data"][str(character_id)]["progressions"]
return ValueModel(
value=character_data[str(cache.season_pass_definition.reward_progression_hash)]["level"]
+ character_data[str(cache.season_pass_definition.prestige_progression_hash)]["level"]
)
async def get_seasonal_challenges(self) -> SeasonalChallengesModel:
"""Returns the seasonal challenges completion info"""
# do we have the info cached?
async with asyncio.Lock():
if not cache.seasonal_challenges_definition:
definition = SeasonalChallengesModel()
# get the info from the db
sc_category_hash = 3443694067
sc_presentation_node = await destiny_manifest.get(
db=self.db, table=DestinyPresentationNodeDefinition, primary_key=sc_category_hash
)
# loop through those categories and get the "Weekly" one
for category_hash in sc_presentation_node.children_presentation_node_hash:
category = await destiny_manifest.get(
db=self.db, table=DestinyPresentationNodeDefinition, primary_key=category_hash
)
if category.name == "Weekly":
# loop through the seasonal challenges topics (Week1, Week2, etc...)
for sc_topic_hash in category.children_presentation_node_hash:
sc_topic = await destiny_manifest.get(
db=self.db, table=DestinyPresentationNodeDefinition, primary_key=sc_topic_hash
)
topic = SeasonalChallengesTopicsModel(name=sc_topic.name)
# loop through the actual seasonal challenges
for sc_hash in sc_topic.children_record_hash:
sc = await destiny_manifest.get(
db=self.db, table=DestinyRecordDefinition, primary_key=sc_hash
)
topic.seasonal_challenges.append(
SeasonalChallengesRecordModel(
record_id=sc.reference_id, name=sc.name, description=sc.description
)
)
definition.topics.append(topic)
break
cache.seasonal_challenges_definition = definition
user_sc = cache.seasonal_challenges_definition.copy()
user_records = await self.get_triumphs()
# now calculate the members completions status
user_sc = await to_thread.run_sync(get_seasonal_challenges_subprocess, user_sc, user_records)
return user_sc
async def get_character_id_by_class(self, character_class: str) -> Optional[int]:
"""Return the matching character id if exists"""
# make sure the class exists
class_names = list(self.class_map.values())
if character_class not in class_names:
return None
# loop through the chars and return the matching one
characters = await self.get_character_info()
if characters:
for character_data in characters.characters:
if character_data.character_class == character_class:
return character_data.character_id
return None
async def get_character_ids(self) -> list[int]:
"""Return the character ids only"""
characters = await self.get_character_info()
ids = []
if characters:
for character_data in characters.characters:
ids.append(character_data.character_id)
return ids
async def get_character_info(self) -> DestinyCharactersModel:
"""Get character info"""
characters = DestinyCharactersModel()
result = await self.__get_profile()
# loop through each character
for character_id, character_data in result["characters"]["data"].items():
character_id = int(character_id)
# format the data correctly and convert the hashes to strings
characters.characters.append(
DestinyCharacterModel(
character_id=character_id,
character_class=self.class_map[character_data["classHash"]],
character_race=self.race_map[character_data["raceHash"]],
character_gender=self.gender_map[character_data["genderHash"]],
)
)
return characters
async def get_triumphs(self) -> dict:
"""Populate the triumphs and then return them"""
result = await self.__get_profile()
# combine profile and character ones
self._triumphs = await to_thread.run_sync(get_triumphs_subprocess, result)
return self._triumphs
async def get_collectibles(self) -> dict:
"""Populate the collectibles and then return them"""
result = await self.__get_profile()
# combine profile and character ones
return await to_thread.run_sync(get_collectibles_subprocess, result)
async def get_metrics(self) -> dict:
"""Populate the metrics and then return them"""
metrics = await self.__get_profile()
return metrics["metrics"]["data"]["metrics"]
async def get_stats(self) -> dict:
"""Get destiny stats"""
route = stat_route.format(system=self.system, destiny_id=self.destiny_id)
result = await self.api.get(route=route)
return result.content
async def get_items_in_inventory_bucket(self, bucket: int) -> list:
"""
Returns all items in bucket. Default is vault hash, for others search "bucket" at https://data.destinysets.com/
Some buckets that are important:
Vault: 138197802
"""
result = await self.__get_profile()
all_items = result["profileInventory"]["data"]["items"]
items = []
for item in all_items:
if item["bucketHash"] == bucket:
items.append(item)
return items
async def get_time_played(
self,
start_time: datetime.datetime,
end_time: datetime.datetime,
mode: int = 0,
activity_ids: Optional[list[int]] = None,
character_class: Optional[str] = None,
) -> int:
"""Get the time played (in seconds)"""
return await crud_activities.calculate_time_played(
db=self.db,
destiny_id=self.destiny_id,
mode=mode,
activity_ids=activity_ids,
start_time=start_time,
end_time=end_time,
character_class=character_class,
)
async def __get_inventory_bucket(
self, *buckets: DestinyInventoryBucketEnum
) -> dict[DestinyInventoryBucketEnum, dict[int, dict]]:
"""
Get all the items from an inventory bucket. Default: All buckets
Returns:
{
DestinyInventoryBucketEnum: {
item_hash: dict_data,
...
},
...
}
"""
# default is vault
if not buckets:
buckets = DestinyInventoryBucketEnum.all()
result = await self.__get_profile()
# only get the items in the correct buckets
items = await to_thread.run_sync(get_inventory_bucket_subprocess, result, buckets)
return items
async def __get_all_inventory_bucket(
self, *buckets: DestinyInventoryBucketEnum, include_item_level: bool = False
) -> dict[int, dict[DestinyInventoryBucketEnum, dict[int, dict]]]:
"""
Get all the items from an inventory bucket. Includes both profile and character. Default: All buckets
Includes the power level is asked for under "power_level"
Returns:
{
character_id: {
DestinyInventoryBucketEnum: {
itemInstanceId: itemComponents_data,
...
},
...
},
...
}
"""
def add_info(result_dict: dict, item: dict, char_id: int):
"""Func to add the items"""
# only get the items in the correct buckets
for bucket in buckets:
if item["bucketHash"] == bucket.value:
if bucket not in result_dict[char_id]:
result_dict[char_id].update({bucket: {}})
result_dict[char_id][bucket].update({item["itemInstanceId"]: item})
if include_item_level:
try:
result_dict[char_id][bucket][item["itemInstanceId"]].update(
{
"power_level": result["itemComponents"]["instances"]["data"][
item["itemInstanceId"]
]["primaryStat"]["value"]
}
)
except KeyError:
pass
break
# default is vault
if not buckets:
buckets = DestinyInventoryBucketEnum.all()
result = await self.__get_profile()
items = {}
# first get the character ids and their class
character_ids = {}
for character_id, character_data in result["characters"]["data"].items():
class_type = character_data["classType"]
if class_type not in character_ids:
character_ids.update({class_type: [int(character_id)]})
else:
character_ids[class_type].append(int(character_id))
# get character inventory
for character_id, character_data in result["characterInventories"]["data"].items():
character_id = int(character_id)
if character_id not in items:
items.update({character_id: {}})
for inv_item in character_data["items"]:
await to_thread.run_sync(add_info, items, inv_item, character_id)
# get character equipped
for character_id, character_data in result["characterEquipment"]["data"].items():
character_id = int(character_id)
for inv_item in character_data["items"]:
await to_thread.run_sync(add_info, items, inv_item, character_id)
# get stuff in vault that is character specific
for profile_data in result["profileInventory"]["data"]["items"]:
# only check if it has a instance id and is in the correct bucket
if (
profile_data["bucketHash"] == DestinyInventoryBucketEnum.VAULT.value
and "itemInstanceId" in profile_data
):
# get the character class and actual bucket hash from the item id
definition = await destiny_items.get_item(db=self.db, item_id=profile_data["itemHash"])
profile_data["bucketHash"] = definition.bucket_type_hash
# try to catch users which deleted their warlock but still have warlock items
if definition.class_type in character_ids:
# add the data to each character
actual_character_ids = character_ids[definition.class_type]
for actual_character_id in actual_character_ids:
await to_thread.run_sync(add_info, items, profile_data, actual_character_id)
return items
async def __get_profile(self) -> dict:
"""
Return info from the profile call
https://bungie-net.github.io/multi/schema_Destiny-DestinyComponentType.html#schema_Destiny-DestinyComponentType
"""
# just calling nearly all of them. Don't need all quite yet, but who knows what the future will bring
components = (
100,
101,
102,
103,
104,
105,
200,
201,
202,
204,
205,
300,
301,
302,
304,
305,
306,
307,
400,
401,
402,
500,
600,
700,
800,
900,
1100,
)
route = profile_route.format(system=self.system, destiny_id=self.destiny_id)
params = {"components": ",".join(map(str, components))}
# need to call this with a token, since this data is sensitive
response = await self.api.get(route=route, params=params, with_token=True)
# get bungie name
bungie_name = f"""{response.content["profile"]["data"]["userInfo"]["bungieGlobalDisplayName"]}#{response.content["profile"]["data"]["userInfo"]["bungieGlobalDisplayNameCode"]}"""
# update name if different
if bungie_name != self.user.bungie_name:
await discord_users.update(db=self.db, to_update=self.user, bungie_name=bungie_name)
return response.content
async def __get_currency_amount(self, bucket: DestinyInventoryBucketEnum) -> int:
"""Returns the amount of the specified currency owned"""
profile = await self.__get_profile()
items = profile["profileCurrencies"]["data"]["items"]
# get the item with the correct bucket
value = 0
for item in items:
if item["bucketHash"] == bucket.value:
value = item["quantity"]
return value
def get_max_power_subprocess(char_data: dict) -> int:
"""Run in anyio subprocess on another thread since this might be slow"""
max_power = 0
for character in char_data:
helmet = 0
gauntlet = 0
chest = 0
leg = 0
class_item = 0
kinetic = 0
energy = 0
power = 0
for bucket, data in char_data[character].items():
# save the items light level
for item_id, item_data in data.items():
match bucket:
case DestinyInventoryBucketEnum.HELMET:
if item_data["power_level"] > helmet:
helmet = item_data["power_level"]
case DestinyInventoryBucketEnum.GAUNTLETS:
if item_data["power_level"] > gauntlet:
gauntlet = item_data["power_level"]
case DestinyInventoryBucketEnum.CHEST:
if item_data["power_level"] > chest:
chest = item_data["power_level"]
case DestinyInventoryBucketEnum.LEG:
if item_data["power_level"] > leg:
leg = | |
import json
from ibmcloud_python_sdk.config import params
from ibmcloud_python_sdk.utils.common import query_wrapper as qw
from ibmcloud_python_sdk.power import get_power_headers as headers
from ibmcloud_python_sdk.utils.common import resource_not_found
from ibmcloud_python_sdk.utils.common import resource_deleted
from ibmcloud_python_sdk.power import instance
from ibmcloud_python_sdk.utils.common import check_args
class Pvm():
def __init__(self):
self.cfg = params()
self.instance = instance.Instance()
def get_pvms(self, instance):
"""Retrieve Power Virtual Instance list for specific cloud instance
:param instance: Cloud instance ID
:type instance: str
:return: PVM list
:rtype: list
"""
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Connect to api endpoint for cloud-instances
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances".format(
ci_info["name"]))
# Return data
return qw("power", "GET", path, headers())["data"]
except Exception as error:
print("Error fetching Power Virtual Instance list for cloud"
" instance {}. {}".format(instance, error))
def get_pvm(self, instance, pvm):
"""Retrieve specific Power Virtual Instance by name or by ID
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:return: PVM information
:rtype: dict
"""
by_name = self.get_pvm_by_name(instance, pvm)
if "errors" in by_name:
for key_name in by_name["errors"]:
if key_name["code"] == "not_found":
by_id = self.get_pvm_by_id(instance, pvm)
if "errors" in by_id:
return by_id
return by_id
else:
return by_name
else:
return by_name
def get_pvm_by_id(self, instance, id):
"""Retrieve specific Power Virtual Instance by ID
:param instance: Cloud instance ID
:type instance: str
:param id: Power Virtual Instance ID
:type id: str
:return: PVM information
:rtype: dict
"""
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Connect to api endpoint for cloud-instances
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances/{}".format(
ci_info["name"], id))
# Return data
return qw("power", "GET", path, headers())["data"]
except Exception as error:
print("Error fetching Power Virtual Instance with ID {} for cloud"
" instance {}. {}".format(id, instance, error))
def get_pvm_by_name(self, instance, name):
"""Retrieve specific Power Virtual Instance by name
:param instance: Cloud instance ID
:type instance: str
:param name: Power Virtual Instance name
:type name: str
:return: PVM information
:rtype: dict
"""
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Retrieve pvms
data = self.get_pvms(ci_info["name"])
if "errors" in data:
return data
# Loop over pvms until filter match
for pvm in data['pvmInstances']:
if pvm["serverName"] == name:
# Return data
return pvm
# Return error if no pvm is found
return resource_not_found()
except Exception as error:
print("Error fetching Power Virtual Instance with name {} for"
" cloud instance {}. {}".format(name, instance, error))
def get_pvm_networks(self, instance, pvm):
"""Retrieve networks list for Power Virtual Instance
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:return: PVM network list
:rtype: list
"""
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Check if pvm exists and retrieve information
pvm_info = self.get_pvm(instance, pvm)
if "errors" in pvm_info:
return pvm_info
# Connect to api endpoint for cloud-instances
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances/{}"
"/networks".format(ci_info["name"],
pvm_info["pvmInstanceID"]))
# Return data
return qw("power", "GET", path, headers())["data"]
except Exception as error:
print("Error fetching network list for Power Virtual Instance list"
" for cloud instance {}. {}".format(instance, error))
def get_pvm_network(self, instance, pvm, network):
"""Retrieve specific network from Power Virtual Instance by name or by ID
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:param network: Network name or ID
:type network: str
:return: PVM network information
:rtype: dict
"""
by_name = self.get_pvm_network_by_name(instance, pvm, network)
if "errors" in by_name:
for key_name in by_name["errors"]:
if key_name["code"] == "not_found":
by_id = self.get_pvm_network_by_id(instance, pvm, network)
if "errors" in by_id:
return by_id
return by_id
else:
return by_name
else:
return by_name
def get_pvm_network_by_id(self, instance, pvm, id):
"""Retrieve specific network from Power Virtual Instance by ID
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:param id: Network ID
:type id: str
:return: PVM network information
:rtype: dict
"""
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Check if pvm exists and retrieve information
pvm_info = self.get_pvm(instance, pvm)
if "errors" in pvm_info:
return pvm_info
# Connect to api endpoint for cloud-instances
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances/{}"
"/networks/{}".format(ci_info["name"],
pvm_info["pvmInstanceID"],
id))
# Return data
return qw("power", "GET", path, headers())["data"]
except Exception as error:
print("Error fetching network with ID {} from Power Virtual"
" Instance {} for cloud instance {}. {}".format(
id, pvm, instance, error))
def get_pvm_network_by_name(self, instance, pvm, name):
"""Retrieve specific Power Virtual Instance by name
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:param name: Network name
:type name: str
:return: PVM network information
:rtype: dict
"""
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Check if pvm exists and retrieve information
pvm_info = self.get_pvm(instance, pvm)
if "errors" in pvm_info:
return pvm_info
# Retrieve networks
data = self.get_pvm_networks(ci_info["name"],
pvm_info["pvmInstanceID"])
if "errors" in data:
return data
# Loop over network until filter match
for network in data['networks']:
if network["networkName"] == name:
# Return data
return network
# Return error if no network is found
return resource_not_found()
except Exception as error:
print("Error fetching network with name {} from Power Virtual"
" Instance {} for cloud instance {}. {}".format(
name, pvm, instance, error))
def perform_action(self, **kwargs):
"""Perform an action on Power Virtual Machine
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:param action: Name of the action to take
:type action: str
:return: Action information
:rtype: dict
"""
args = ["instance", "pvm", "action"]
check_args(args, **kwargs)
# Build dict of argument and assign default value when needed
args = {
'instance': kwargs.get('instance'),
'pvm': kwargs.get('pvm'),
'action': kwargs.get('action'),
}
# Construct payload
payload = {}
for key, value in args.items():
if key != "instance" and key != "pvm" and value is not None:
payload[key] = value
try:
# Check if cloud instance exists and retrieve information
ci_info = self.instance.get_instance(args['instance'])
if "errors" in ci_info:
return ci_info
# Check if pvm exists and retrieve information
pvm_info = self.get_pvm(ci_info["name"], args['pvm'])
if "errors" in pvm_info:
return pvm_info
# Connect to api endpoint for cloud-instances
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances/{}"
"/action".format(ci_info["name"],
pvm_info["pvmInstanceID"]))
# Return data
return qw("power", "POST", path, headers(),
json.dumps(payload))["data"]
except Exception as error:
print("Error performing action {} on Power Virtual Machine {} for"
" cloud instance {}. {}".format(args['action'],
args['network'],
args['instance'], error))
def delete_pvm(self, instance, pvm):
"""Delete Power Virtual Instance
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:return: Deletion status
:rtype: dict
"""
try:
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Check if pvm exists and retrieve information
pvm_info = self.get_pvm(instance, pvm)
if "errors" in pvm_info:
return pvm_info
# Connect to api endpoint for cloud-instances
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances/{}".format(
ci_info["name"], pvm_info["pvmInstanceID"]))
data = qw("power", "DELETE", path, headers())
# Return data
if data["response"].status != 200:
return data["data"]
# Return status
return resource_deleted()
except Exception as error:
print("Error deleting Power Virtual Instance {} from cloud"
" instance {}. {}".format(pvm, instance, error))
def delete_pvm_network(self, instance, pvm, network):
"""Delete Power Virtual Instance network
:param instance: Cloud instance ID
:type instance: str
:param pvm: Power Virtual Instance name or ID
:type pvm: str
:param network: Network name or ID
:type network: str
:return: Deletion status
:rtype: dict
"""
try:
ci_info = self.instance.get_instance(instance)
if "errors" in ci_info:
return ci_info
# Check if pvm exists and retrieve information
pvm_info = self.get_pvm(instance, pvm)
if "errors" in pvm_info:
return pvm_info
net_info = self.get_pvm_network(ci_info["name"],
pvm_info["pvmInstanceID"],
network)
if "errors" in net_info:
return net_info
path = ("/pcloud/v1/cloud-instances/{}/pvm-instances/{}"
"/networks/{}".format(ci_info["name"],
pvm_info["pvmInstanceID"],
net_info["networkID"]))
data = qw("power", "DELETE", path, headers())
# Return data
if data["response"].status != 200:
return data["data"]
# Return status
return | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
File: unimo_grounded_baseline.py
Author: liwei(<EMAIL>)
Date: 2021-08-31 20:46
Desc: RoBERTa + ViT + Grounded Transformer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import six
import paddle
import paddle.distributed.fleet as fleet
from model.transformer_encoder import encoder as grounded_encoder
from model.transformer_encoder import encoder as text_encoder
from model.transformer_encoder import pre_process_layer as text_pre_process_layer
from model.vision_transformer_encoder import encoder as vit_encoder
from model.vision_transformer_encoder import pre_process_layer as vit_pre_process_layer
from utils.pos_emb_interpolate import interpolate_pos_embed
class VlConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing VL model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict.get(key, None)
def __setitem__(self, key, value):
self._config_dict[key] = value
def print_config(self):
for arg, value in self._config_dict.items():
print('%s: %s' % (arg, value))
print('------------------------------------------------')
class VlModel(object):
def __init__(self,
image_input=None,
image_mask=None,
text_input=None,
text_mask=None,
config=None,
weight_sharing=True,
task_type="normal",
decoding=False,
gather_idx=None,
grounded_encoder_trainable=True,
vit_encoder_trainable=True,
text_encoder_trainable=True,
with_cmcl_projection=True,
text_enc_layers=None,
grounding_enc_layers=None):
# for grounded cross-modal encoder
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._max_position_seq_len = config['max_position_embeddings']
self._hidden_act = config['hidden_act']
self._emb_size = config['hidden_size']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self._emb_dtype = "float32"
self._sent_types = config['type_vocab_size']
# for text encoder
self._text_n_layer = config['text_num_hidden_layers']
self._text_n_head = config['text_num_attention_heads']
self._text_voc_size = config['text_vocab_size']
self._text_max_position_seq_len = config['text_max_position_embeddings']
self._text_hidden_act = config['text_hidden_act']
self._text_prepostprocess_dropout = config['text_hidden_dropout_prob']
self._text_attention_dropout = config['text_attention_probs_dropout_prob']
self._text_emb_vocab_size = {"text.word_embedding": self._text_voc_size,
"text.pos_embedding": self._text_max_position_seq_len}
# for vit encoder
self._vit_n_layer = config['vit_num_hidden_layers']
self._vit_n_head = config['vit_num_attention_heads']
self._vit_hidden_act = config['vit_hidden_act']
self._vit_prepostprocess_dropout = config['vit_hidden_dropout_prob']
self._vit_attention_dropout = config['vit_attention_probs_dropout_prob']
self._vit_layer_norm_eps = config['vit_layer_norm_eps']
self._weight_sharing = weight_sharing
self._grounded_encoder_trainable = grounded_encoder_trainable
self._vit_encoder_trainable = vit_encoder_trainable
self._text_encoder_trainable = text_encoder_trainable
self._with_cmcl_projection = with_cmcl_projection
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = paddle.fluid.initializer.TruncatedNormalInitializer(scale=config['initializer_range'])
self._bias_initializer = paddle.fluid.initializer.ConstantInitializer(value=0.0)
assert text_input is not None or image_input is not None, "text_input and image_input cannot be both None"
self._task_type = task_type
self._is_img2txt_task = (task_type == "img2txt")
self._is_multimodal_task = (image_input is not None)
self.image_size = config['image_size']
self.num_codebook = config['num_codebook']
self.resolution = config['resolution']
self.width = self.image_size // config['resolution']
self.patch_seq_len = self.image_size * self.image_size // (config['resolution'] * config['resolution'])
self.patch_emb_size = config['resolution'] * config['resolution'] * 3
if text_enc_layers is None:
text_enc_layers = list(range(self._text_n_layer))
if grounding_enc_layers is None:
grounding_enc_layers = list(range(self._n_layer))
print("text_enc_layers", text_enc_layers)
print("grounding_enc_layers", grounding_enc_layers)
self.text_enc_layers = text_enc_layers
self.grounding_enc_layers = grounding_enc_layers
self.cmcl_temperature = paddle.static.create_parameter(
shape=[1],
dtype=self._emb_dtype,
attr=paddle.ParamAttr(
name="cmcl_temperature",
initializer=paddle.fluid.initializer.ConstantInitializer(value=0.07)))
if decoding:
self.grounded_caches = [{
"k":
paddle.fluid.layers.fill_constant_batch_size_like(
input=text_input["text.word_embedding"] if text_input is not None else image_input[
"pixel_embedding"],
shape=[-1, 0, self._emb_size],
dtype=self._emb_dtype, # float32,
value=0),
"v":
paddle.fluid.layers.fill_constant_batch_size_like(
input=text_input["text.word_embedding"] if text_input is not None else image_input[
"pixel_embedding"],
shape=[-1, 0, self._emb_size],
dtype=self._emb_dtype, # float32,
value=0),
} for i in range(self._n_layer)]
self.text_caches = [{
"k":
paddle.fluid.layers.fill_constant_batch_size_like(
input=text_input["text.word_embedding"] if text_input is not None else image_input[
"pixel_embedding"],
shape=[-1, 0, self._emb_size],
dtype=self._emb_dtype, # float32,
value=0),
"v":
paddle.fluid.layers.fill_constant_batch_size_like(
input=text_input["text.word_embedding"] if text_input is not None else image_input[
"pixel_embedding"],
shape=[-1, 0, self._emb_size],
dtype=self._emb_dtype, # float32,
value=0),
} for i in range(self._text_n_layer)]
else:
self.grounded_caches = None
self.text_caches = None
self._build_model(text_input=text_input,
text_mask=text_mask,
gather_idx=gather_idx,
image_input=image_input,
image_mask=image_mask)
def _build_model(self, text_input=None, text_mask=None, gather_idx=None, image_input=None, image_mask=None):
if text_input is None and image_input is not None: # for img2txt when decoding or image tasks
self._enc_v_out, self.all_checkpoints = self.encode(image_input=image_input,
image_mask=image_mask,
gather_idx=gather_idx)
elif text_input is not None and image_input is None: # for textual tasks
self._enc_l_out, self.all_checkpoints = self.encode(text_input=text_input,
text_mask=text_mask,
gather_idx=gather_idx)
else: # for multi-modal tasks
self._enc_v_out, self._enc_l_out, self.all_checkpoints = \
self.encode(text_input=text_input,
text_mask=text_mask,
gather_idx=gather_idx,
image_input=image_input,
image_mask=image_mask)
def encode(self, text_input=None, text_mask=None, gather_idx=None,
image_input=None, image_mask=None, decoding_step=False, grounded_decoding_mask=None):
all_checkpoints = []
# padding id in vocabulary must be set to 0
if text_input is None and image_input is not None: # for img2txt task when decoding or image tasks
emb_v_out, v_seq_len, n_head_self_attn_mask, _checkpoints = \
self._gen_input(image_input=image_input, image_mask=image_mask, gather_idx=gather_idx)
all_checkpoints.extend(_checkpoints)
enc_v_out, grounding_checkpoints = grounded_encoder(
enc_input=emb_v_out,
attn_bias=n_head_self_attn_mask,
enc_layers=self.grounding_enc_layers,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='grounded.encoder',
caches=self.grounded_caches,
gather_idx=gather_idx,
trainable=self._grounded_encoder_trainable)
all_checkpoints.extend(grounding_checkpoints)
return enc_v_out, all_checkpoints
elif image_input is None and text_input is not None: # for textual task
if decoding_step: # for step-by-step generation during decoding
emb_l_out, l_seq_len, n_head_self_attn_mask, _checkpoints = \
self._gen_input(text_input=text_input, text_mask=text_mask, gather_idx=gather_idx,
decoding_step=True, grounded_decoding_mask=grounded_decoding_mask)
all_checkpoints.extend(_checkpoints)
enc_l_out, grounding_checkpoints = grounded_encoder(
enc_input=emb_l_out,
attn_bias=n_head_self_attn_mask,
enc_layers=self.grounding_enc_layers,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='grounded.encoder',
caches=self.grounded_caches,
gather_idx=gather_idx,
trainable=self._grounded_encoder_trainable)
all_checkpoints.extend(grounding_checkpoints)
return enc_l_out, all_checkpoints
else:
emb_l_out, l_seq_len, n_head_self_attn_mask, _checkpoints = \
self._gen_input(text_input=text_input, text_mask=text_mask, gather_idx=gather_idx)
all_checkpoints.extend(_checkpoints)
enc_l_out, grounding_checkpoints = grounded_encoder(
enc_input=emb_l_out,
attn_bias=n_head_self_attn_mask,
enc_layers=self.grounding_enc_layers,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='grounded.encoder',
caches=self.grounded_caches,
gather_idx=gather_idx,
trainable=self._grounded_encoder_trainable)
all_checkpoints.extend(grounding_checkpoints)
return enc_l_out, all_checkpoints
elif image_input is not None and text_input is not None: # for multi-modal task
emb_v_out, emb_l_out, v_seq_len, l_seq_len, n_head_self_attn_mask, _checkpoints = \
self._gen_input(image_input=image_input, image_mask=image_mask,
text_input=text_input, text_mask=text_mask, gather_idx=gather_idx)
all_checkpoints.extend(_checkpoints)
emb_vl_out = paddle.concat([emb_v_out, emb_l_out], axis=1)
enc_vl_out, grounding_checkpoints = grounded_encoder(
enc_input=emb_vl_out,
attn_bias=n_head_self_attn_mask,
enc_layers=self.grounding_enc_layers,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='grounded.encoder',
caches=self.grounded_caches,
gather_idx=gather_idx,
trainable=self._grounded_encoder_trainable)
all_checkpoints.extend(grounding_checkpoints)
enc_v_out = paddle.slice(
input=enc_vl_out, axes=[1], starts=[0], ends=[v_seq_len])
enc_l_out = paddle.slice(
input=enc_vl_out, axes=[1], starts=[v_seq_len], ends=[v_seq_len + l_seq_len])
return enc_v_out, enc_l_out, all_checkpoints
else:
raise ValueError("The input is invalid")
def vit_encode(self, image_input, image_mask):
"""encode image by pre-trained ViT"""
assert image_mask is not None, "text_mask should not be none"
image_self_attn_mask = paddle.matmul(x=paddle.transpose(image_mask, perm=[0, 2, 1]), y=image_mask)
self_attn_mask = paddle.scale(
x=image_self_attn_mask, scale=1e4, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(
x=[self_attn_mask] * self._vit_n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
pixel_embeddings = paddle.static.nn.conv2d(
input=image_input['pixel_embedding'],
num_filters=self._emb_size,
filter_size=self.resolution,
stride=self.resolution,
padding=(self.resolution - 1) // 2,
param_attr=paddle.ParamAttr(
name="vit.patch_embeddings_projection_weight",
trainable=self._vit_encoder_trainable),
bias_attr=paddle.ParamAttr(
name="vit.patch_embeddings_projection_bias",
trainable=self._vit_encoder_trainable),
data_format="NHWC")
# paddle.static.Print(paddle.shape(pixel_embeddings), message="pixel_embeddings", summarize=-1)
pixel_embeddings = paddle.reshape(pixel_embeddings, shape=[-1, self.patch_seq_len, self._emb_size])
cls_token_emb = paddle.static.create_parameter(
shape=[1, 1, self._emb_size],
dtype=self._emb_dtype,
attr=paddle.ParamAttr(
name="vit.cls_token_embeddings",
trainable=self._vit_encoder_trainable,
initializer=self._param_initializer))
cls_token_emb = paddle.expand(x=cls_token_emb,
shape=[paddle.shape(pixel_embeddings)[0], 1, self._emb_size])
# cpncate global [CLS] token with image patches
# (batch_size, patch_seq_len + 1, emb_dim)
all_pixel_embeddings = paddle.concat(x=[cls_token_emb, pixel_embeddings], axis=1)
# default image_size=224, resolution=16, patch_seq_len=196
pixel_pos_emb = paddle.static.create_parameter(
shape=[1, 197, self._emb_size],
dtype=self._emb_dtype,
attr=paddle.ParamAttr(name="vit.position_embeddings",
trainable=self._vit_encoder_trainable,
initializer=self._param_initializer))
# paddle.static.Print(paddle.shape(pixel_pos_emb), message="pixel_pos_emb", summarize=-1)
if self.patch_seq_len > 196: # when image_size > 224
pixel_pos_emb = interpolate_pos_embed(pixel_pos_emb, self.patch_seq_len)
emb_v_out = all_pixel_embeddings + pixel_pos_emb
emb_v_out = vit_pre_process_layer(
emb_v_out, 'd', self._vit_prepostprocess_dropout, name='vit.pre_encoder',
trainable=self._vit_encoder_trainable)
vit_enc_out, checkpoints = vit_encoder(
enc_input=emb_v_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._vit_n_layer,
n_head=self._vit_n_head,
d_key=self._emb_size // self._vit_n_head,
d_value=self._emb_size // self._vit_n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._vit_prepostprocess_dropout,
attention_dropout=self._vit_attention_dropout,
relu_dropout=0,
hidden_act=self._vit_hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=self._param_initializer,
name='vit.encoder',
trainable=self._vit_encoder_trainable)
vit_seq_len = paddle.shape(vit_enc_out)[1] # patch_seq_len + 1
return vit_enc_out, vit_seq_len, n_head_self_attn_mask, checkpoints
def text_encode(self, text_input, text_mask, gather_idx=None, decoding_step=False):
assert text_mask is not None, "text_mask should not be none"
if decoding_step:
text_self_attn_mask = text_mask
else:
text_self_attn_mask = paddle.matmul(x=paddle.transpose(text_mask, perm=[0, 2, 1]), y=text_mask)
self_attn_mask = paddle.scale(
x=text_self_attn_mask, scale=1e4, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
# text part
text_emb = paddle.static.nn.embedding(
input=text_input["text.word_embedding"],
size=[self._text_emb_vocab_size["text.word_embedding"], self._emb_size],
dtype=self._emb_dtype,
param_attr=paddle.ParamAttr(
name='text.word_embedding',
trainable=self._text_encoder_trainable,
initializer=self._param_initializer))
text_emb = paddle.squeeze(text_emb, axis=2) # (batch_size, seq_len, emb_dim)
pos_emb = paddle.static.nn.embedding(
input=text_input["text.pos_embedding"],
size=[self._text_emb_vocab_size["text.pos_embedding"], self._emb_size],
dtype=self._emb_dtype,
param_attr=paddle.ParamAttr(
name='text.pos_embedding',
trainable=self._text_encoder_trainable,
initializer=self._param_initializer))
pos_emb = paddle.squeeze(pos_emb, axis=2) # (batch_size, seq_len, emb_dim)
emb_out = text_emb + pos_emb
emb_out = text_pre_process_layer(
emb_out, 'nd', self._text_prepostprocess_dropout,
name="text.pre_encoder", trainable=self._text_encoder_trainable)
text_enc_out, checkpoints = text_encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
enc_layers=self.text_enc_layers,
n_head=self._text_n_head,
d_key=self._emb_size // self._text_n_head,
d_value=self._emb_size // self._text_n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._text_prepostprocess_dropout,
attention_dropout=self._text_attention_dropout,
relu_dropout=0,
hidden_act=self._text_hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='text.encoder',
caches=self.text_caches,
gather_idx=gather_idx,
trainable=self._text_encoder_trainable)
text_seq_len = paddle.shape(text_enc_out)[1]
return text_enc_out, text_seq_len, n_head_self_attn_mask, checkpoints
def _gen_input(self, text_input=None, text_mask=None,
image_input=None, image_mask=None, gather_idx=None, decoding_step=False,
grounded_decoding_mask=None):
_checkpoints = []
"""encode images and texts independently by Vit and RoBERTa, get the optimal grounded tokens"""
if image_input is not None:
# visual part
self.vit_enc_out, vit_seq_len, image_self_attn_mask, vit_checkpoints = self.vit_encode(image_input,
image_mask)
_checkpoints.extend(vit_checkpoints)
if text_input is not None:
# textual part
self.text_enc_out, text_seq_len, text_self_attn_mask, text_checkpoints = self.text_encode(text_input,
text_mask,
gather_idx=gather_idx,
decoding_step=decoding_step)
_checkpoints.extend(text_checkpoints)
if | |
n in self.net["neuron_pools"]:
self.net_np_nps[n] = [n]
self.net_np_sps[n] = []
for s,S in self.net["synapse_pools"].items():
assert("target" in S), "ERROR: st_meta_network: " \
+ "no target parameter found for sp " + s
if S["target"] == n:
# further on with graph for np
self.net_np_sps[n].append(s)
sources = S["source"]
for source_np in [item for sub_list in sources for item in sub_list]:
if source_np not in self.net_np_nps[n]:
self.net_np_nps[n].append(source_np)
# Compute nets for plasts.
self.net_plast_nps = {}
self.net_plast_sps = {}
self.net_plast_depth = {}
for p,P in self.net["plasticities"].items():
if P["type"] in ["hebbian"]:
# For hebb we actually need no deep computation.
self.net_plast_depth[p] = 0
# Generate empty lists.
self.net_plast_nps[p] = [[] for i in range(self.net_plast_depth[p] + 1)]
self.net_plast_sps[p] = [[] for i in range(self.net_plast_depth[p] + 1)]
# Add target to np list at time 0.
self.net_plast_nps[p][-1].append(P["target"])
# Check number of parameter.
if len(P["parameter"]) != 1:
print("WARNING: For plasticity " + p + " of " + P["type"] \
+ " type more than one parameter was found.")
# Add all sources of sps.
src_sp = P["parameter"][0][1]
for sublist in range(len(self.net["synapse_pools"][src_sp]["source"])):
for src in range(len(self.net["synapse_pools"][src_sp]["source"][sublist])):
self.net_plast_nps[p][-1].append(self.net["synapse_pools"][src_sp]["source"][sublist][src])
# Add sp.
if P["parameter"][0][0] == "sp":
self.net_plast_sps[p][-1].append(P["parameter"][0][1])
else:
print("Warning: For " + P["type"] + " plasticity " + p \
+ " found parameter other then from sp: " \
+ str(P["parameter"][0]))
elif P["type"] == "loss":
# Set depth of plasticity graph to max of source and target.
if has_target(P.get("loss_function", None)):
self.net_plast_depth[p] = max(P["source_t"], P["target_t"])
else:
self.net_plast_depth[p] = P["source_t"]
# lists of needed nps / sps for each level of depth
# note: nps[0] is all inputs, and these are all inputs
# sps[d] are the input sps to nps[d], hence:
# sps[0] is always []
# the minimal distance from target to overall inputs must be at least depth
# In principle this builds a reverse pyramid from target to all sources of distance depth.
self.net_plast_nps[p] = [[] for i in range(self.net_plast_depth[p] + 1)]
self.net_plast_sps[p] = [[] for i in range(self.net_plast_depth[p] + 1)]
# Initialize with source and target layer.
if has_target(P.get("loss_function", None)):
self.net_plast_nps[p][P["target_t"]].append(P["target"])
self.net_plast_nps[p][P["source_t"]].append(P["source"])
# Consider masking neuron-pool.
if "mask" in P:
if has_target(P.get("loss_function", None)):
self.net_plast_nps[p][P["target_t"]].append(P["mask"])
else:
self.net_plast_nps[p][P["source_t"]].append(P["mask"])
# Consider uncertainty.
if P["loss_function"] == "reg_uncertainty":
self.net_plast_nps[p][P["source_t"]].append(P["uncertainty"])
network_rollback(self.net, self.net_plast_depth[p], self.net_plast_nps[p], self.net_plast_sps[p])
# Nice print of rolled-out network for specific loss.
# if p == "loss_scalar":
# print("\nNeuron-pools:")
# for np_d in range(len(self.net_plast_nps[p])):
# print("\nDepth: " + str(np_d) + " " + str(self.net_plast_nps[p][np_d]))
# print("\nSynapse-pools:")
# for sp_d in range(len(self.net_plast_sps[p])):
# print("\nDepth: " + str(sp_d) + " " + str(self.net_plast_sps[p][sp_d]))
elif P["type"] == "L_regularizer":
# set depth of plasticity graph to 0 (no rollout necessary)
self.net_plast_depth[p] = 0
self.net_plast_nps[p] = [[]]
self.net_plast_sps[p] = [[]]
# initialize with all np / sp of which parameters are regularized
for par in P["parameter"]:
if par[0] == "np":
if par[1] not in self.net_plast_nps[p][0]:
self.net_plast_nps[p][0].append(par[1])
elif par[0] == "sp":
# add sps
if par[1] not in self.net_plast_sps[p][0]:
self.net_plast_sps[p][0].append(par[1])
# and also add all sp"s source nps (needed to init sp) and targets
self.net_plast_nps[p][0].append(self.net["synapse_pools"][par[1]]["target"])
sources = [item for sub_list in self.net["synapse_pools"][par[1]]["source"] for item in sub_list]
for src in sources:
if src not in self.net_plast_nps[p][0]:
self.net_plast_nps[p][0].append(src)
def is_sane(self):
"""Sanity check for entire network structure.
"""
is_sane = True
if not isinstance(self.net, dict):
print(" Error: Got no dictionary from st_graph file.")
return False
# Check for name and agents.
if "name" not in self.net:
print(" Error: Please specify a network name (e.g. name: test).")
is_sane = False
if "agents" not in self.net:
print(" Error: Please specify number of agents (aka. batchsize) " \
+ "(e.g. agents: 16).")
is_sane = False
# Check that np, sp, plast, if is in net.
T = []
for t in ["np", "sp", "plast", "if"]:
if S2L(t) not in self.net:
print(" Error: Unable to find " + S2L(t) + ".")
is_sane = False
else:
if not isinstance(self.net[S2L(t)], dict):
print(" Error: Network definition of " + str(S2L(t)) \
+ " must be a dictionary.")
is_sane = False
else:
T.append(t)
# Check for valid item names.
for t in T:
for n in self.net[S2L(t)]:
if self.net[S2L(t)][n] is None:
print(" Error: Found empty item: '" + str(n) + "'")
is_sane = False
for c in n:
if not c.isalpha() and not c.isdigit() and not c == "_":
print(" Error: Item names may only contain " \
+ "[a-zA-Z0-9_]. Invalid name: '" + str(n) + "'")
is_sane = False
break
# Check for unique item names (across item types).
for t0 in T:
for t1 in T:
if t0 != t1:
for n0 in self.net[S2L(t0)]:
for n1 in self.net[S2L(t1)]:
if n0 == n1:
print(" Error: Item names must be unique. " \
+ "Found '" + n0 + "' as " + t0 + " and " + t1)
is_sane = False
# Do some general np checks.
if "np" in T:
for n, N in self.net["neuron_pools"].items():
# Check for valid 'bias_shape' np parameter values.
if "bias_shape" in N:
if N["bias_shape"] not in ["full", "feature", "spatial", "scalar", False]:
print(" Error: Invalid value of 'bias_shape' parameter found for np " \
+ n)
is_sane = False
# Check for valid 'gain_shape' np parameter values.
if "gain_shape" in N:
if N["gain_shape"] not in ["full", "feature", "spatial", "scalar", False]:
print(" Error: Invalid value of 'gain_shape' parameter found for np " \
+ n)
is_sane = False
# Check for valid batch normalization.
if "batchnorm_mean" in N:
if N["batchnorm_mean"] not in ["full", "feature", "spatial", "scalar", False]:
print(" Error: Invalid value of 'batchnorm_mean' parameter found for np " \
+ n)
is_sane = False
if "batchnorm_std" in N:
if N["batchnorm_std"] not in ["full", "feature", "spatial", "scalar", False]:
print(" Error: Invalid value of 'batchnorm_std' parameter found for np " \
+ n)
is_sane = False
# Check for valid layer normalization.
if "layernorm_mean" in N:
if N["layernorm_mean"] not in ["full", "feature", "spatial", False]:
print(" Error: Invalid value of 'layernorm_mean' parameter found for np " \
+ n)
is_sane = False
if "layernorm_std" in N:
if N["layernorm_std"] not in ["full", "feature", "spatial", False]:
print(" Error: Invalid value of 'layernorm_std' parameter found for np " \
+ n)
is_sane = False
# Some sp checks.
if "sp" in T:
for s, S in self.net["synapse_pools"].items():
if S is not None:
# Check that all synapse pools have source and target.
if "source" not in S:
print(" Error: No source(s) specified for synapse \
pool '" + s + "'.")
is_sane = False
else:
# Check for correct factor_shapes length and values.
if "factor_shapes" in S:
if not isinstance(S["factor_shapes"], list):
print(" Error: For synapse \
pool '" + s + "'. factor_shapes must be a list\
with length equal to number of factors.")
is_sane = False
else:
if len(S["factor_shapes"]) != len(S["source"]):
print(" Error: For synapse \
pool '" + s + "'. If factor_shapes defined, \
they must be defined for all factors.")
is_sane = False
else:
for f in S["factor_shapes"]:
if f not in ["full", "feature", "spatial", "scalar"]:
print(" Error: For synapse \
pool '" + s + "'. Unexpected factor shape: " \
+ str(f) + ". Expected: full, feature, spatial or scalar.")
is_sane = False
# Some checks for bias shapes.
if "bias_shapes" in S:
# Check if list.
if not isinstance(S["bias_shapes"], list):
print(" Error: For synapse \
pool '" + s + "'. bias_shapes must be a list\
with length equal to number of factors.")
is_sane = False
else:
# Check for correct length of list.
if len(S["bias_shapes"]) != len(S["source"]):
print(" Error: For synapse \
pool '" + s + "'. If bias_shapes defined, \
they must be defined for all factors.")
is_sane = False
else:
# Check consistency with factor_shapes.
if "factor_shapes" in S:
if isinstance(S["factor_shapes"], list):
if len(S["factor_shapes"]) == len(S["source"]):
for f in range(len(S["factor_shapes"])):
if S["factor_shapes"][f] == "feature":
if S["bias_shapes"][f] not in ["feature", "scalar"]:
print(" Error: For synapse \
pool '" + | |
<filename>rbb_server/src/rbb_swagger_server/models/simulation_detailed.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from rbb_swagger_server.models.base_model_ import Model
from rbb_swagger_server.models.simulation_environment_detailed import SimulationEnvironmentDetailed # noqa: F401,E501
from rbb_swagger_server.models.simulation_run_detailed import SimulationRunDetailed # noqa: F401,E501
from rbb_swagger_server.models.simulation_summary import SimulationSummary # noqa: F401,E501
from rbb_swagger_server.models.task_detailed import TaskDetailed # noqa: F401,E501
from rbb_swagger_server import util
class SimulationDetailed(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, detail_type: str=None, identifier: int=None, description: str=None, created: datetime=None, result: int=None, environment_name: str=None, queued_task_identifier: str=None, queued_task_state: int=None, config: object=None, on_complete_action: object=None, environment: SimulationEnvironmentDetailed=None, runs: List[SimulationRunDetailed]=None, queued_task: TaskDetailed=None): # noqa: E501
"""SimulationDetailed - a model defined in Swagger
:param detail_type: The detail_type of this SimulationDetailed. # noqa: E501
:type detail_type: str
:param identifier: The identifier of this SimulationDetailed. # noqa: E501
:type identifier: int
:param description: The description of this SimulationDetailed. # noqa: E501
:type description: str
:param created: The created of this SimulationDetailed. # noqa: E501
:type created: datetime
:param result: The result of this SimulationDetailed. # noqa: E501
:type result: int
:param environment_name: The environment_name of this SimulationDetailed. # noqa: E501
:type environment_name: str
:param queued_task_identifier: The queued_task_identifier of this SimulationDetailed. # noqa: E501
:type queued_task_identifier: str
:param queued_task_state: The queued_task_state of this SimulationDetailed. # noqa: E501
:type queued_task_state: int
:param config: The config of this SimulationDetailed. # noqa: E501
:type config: object
:param on_complete_action: The on_complete_action of this SimulationDetailed. # noqa: E501
:type on_complete_action: object
:param environment: The environment of this SimulationDetailed. # noqa: E501
:type environment: SimulationEnvironmentDetailed
:param runs: The runs of this SimulationDetailed. # noqa: E501
:type runs: List[SimulationRunDetailed]
:param queued_task: The queued_task of this SimulationDetailed. # noqa: E501
:type queued_task: TaskDetailed
"""
self.swagger_types = {
'detail_type': str,
'identifier': int,
'description': str,
'created': datetime,
'result': int,
'environment_name': str,
'queued_task_identifier': str,
'queued_task_state': int,
'config': object,
'on_complete_action': object,
'environment': SimulationEnvironmentDetailed,
'runs': List[SimulationRunDetailed],
'queued_task': TaskDetailed
}
self.attribute_map = {
'detail_type': 'detail_type',
'identifier': 'identifier',
'description': 'description',
'created': 'created',
'result': 'result',
'environment_name': 'environment_name',
'queued_task_identifier': 'queued_task_identifier',
'queued_task_state': 'queued_task_state',
'config': 'config',
'on_complete_action': 'on_complete_action',
'environment': 'environment',
'runs': 'runs',
'queued_task': 'queued_task'
}
self._detail_type = detail_type
self._identifier = identifier
self._description = description
self._created = created
self._result = result
self._environment_name = environment_name
self._queued_task_identifier = queued_task_identifier
self._queued_task_state = queued_task_state
self._config = config
self._on_complete_action = on_complete_action
self._environment = environment
self._runs = runs
self._queued_task = queued_task
@classmethod
def from_dict(cls, dikt) -> 'SimulationDetailed':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SimulationDetailed of this SimulationDetailed. # noqa: E501
:rtype: SimulationDetailed
"""
return util.deserialize_model(dikt, cls)
@property
def detail_type(self) -> str:
"""Gets the detail_type of this SimulationDetailed.
:return: The detail_type of this SimulationDetailed.
:rtype: str
"""
return self._detail_type
@detail_type.setter
def detail_type(self, detail_type: str):
"""Sets the detail_type of this SimulationDetailed.
:param detail_type: The detail_type of this SimulationDetailed.
:type detail_type: str
"""
if detail_type is None:
raise ValueError("Invalid value for `detail_type`, must not be `None`") # noqa: E501
self._detail_type = detail_type
@property
def identifier(self) -> int:
"""Gets the identifier of this SimulationDetailed.
:return: The identifier of this SimulationDetailed.
:rtype: int
"""
return self._identifier
@identifier.setter
def identifier(self, identifier: int):
"""Sets the identifier of this SimulationDetailed.
:param identifier: The identifier of this SimulationDetailed.
:type identifier: int
"""
if identifier is None:
raise ValueError("Invalid value for `identifier`, must not be `None`") # noqa: E501
self._identifier = identifier
@property
def description(self) -> str:
"""Gets the description of this SimulationDetailed.
:return: The description of this SimulationDetailed.
:rtype: str
"""
return self._description
@description.setter
def description(self, description: str):
"""Sets the description of this SimulationDetailed.
:param description: The description of this SimulationDetailed.
:type description: str
"""
self._description = description
@property
def created(self) -> datetime:
"""Gets the created of this SimulationDetailed.
:return: The created of this SimulationDetailed.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created: datetime):
"""Sets the created of this SimulationDetailed.
:param created: The created of this SimulationDetailed.
:type created: datetime
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def result(self) -> int:
"""Gets the result of this SimulationDetailed.
0 is scheduled, -1 is prep failed, -100 is sim failed, 100 is sim succeeded # noqa: E501
:return: The result of this SimulationDetailed.
:rtype: int
"""
return self._result
@result.setter
def result(self, result: int):
"""Sets the result of this SimulationDetailed.
0 is scheduled, -1 is prep failed, -100 is sim failed, 100 is sim succeeded # noqa: E501
:param result: The result of this SimulationDetailed.
:type result: int
"""
if result is None:
raise ValueError("Invalid value for `result`, must not be `None`") # noqa: E501
self._result = result
@property
def environment_name(self) -> str:
"""Gets the environment_name of this SimulationDetailed.
Name of the simulation environment # noqa: E501
:return: The environment_name of this SimulationDetailed.
:rtype: str
"""
return self._environment_name
@environment_name.setter
def environment_name(self, environment_name: str):
"""Sets the environment_name of this SimulationDetailed.
Name of the simulation environment # noqa: E501
:param environment_name: The environment_name of this SimulationDetailed.
:type environment_name: str
"""
if environment_name is None:
raise ValueError("Invalid value for `environment_name`, must not be `None`") # noqa: E501
self._environment_name = environment_name
@property
def queued_task_identifier(self) -> str:
"""Gets the queued_task_identifier of this SimulationDetailed.
Identifier of the task associated to this simulation # noqa: E501
:return: The queued_task_identifier of this SimulationDetailed.
:rtype: str
"""
return self._queued_task_identifier
@queued_task_identifier.setter
def queued_task_identifier(self, queued_task_identifier: str):
"""Sets the queued_task_identifier of this SimulationDetailed.
Identifier of the task associated to this simulation # noqa: E501
:param queued_task_identifier: The queued_task_identifier of this SimulationDetailed.
:type queued_task_identifier: str
"""
self._queued_task_identifier = queued_task_identifier
@property
def queued_task_state(self) -> int:
"""Gets the queued_task_state of this SimulationDetailed.
Read only value, taken from associated task # noqa: E501
:return: The queued_task_state of this SimulationDetailed.
:rtype: int
"""
return self._queued_task_state
@queued_task_state.setter
def queued_task_state(self, queued_task_state: int):
"""Sets the queued_task_state of this SimulationDetailed.
Read only value, taken from associated task # noqa: E501
:param queued_task_state: The queued_task_state of this SimulationDetailed.
:type queued_task_state: int
"""
self._queued_task_state = queued_task_state
@property
def config(self) -> object:
"""Gets the config of this SimulationDetailed.
Configuration of the simulation. # noqa: E501
:return: The config of this SimulationDetailed.
:rtype: object
"""
return self._config
@config.setter
def config(self, config: object):
"""Sets the config of this SimulationDetailed.
Configuration of the simulation. # noqa: E501
:param config: The config of this SimulationDetailed.
:type config: object
"""
if config is None:
raise ValueError("Invalid value for `config`, must not be `None`") # noqa: E501
self._config = config
@property
def on_complete_action(self) -> object:
"""Gets the on_complete_action of this SimulationDetailed.
Action to take when simulation completes. # noqa: E501
:return: The on_complete_action of this SimulationDetailed.
:rtype: object
"""
return self._on_complete_action
@on_complete_action.setter
def on_complete_action(self, on_complete_action: object):
"""Sets the on_complete_action of this SimulationDetailed.
Action to take when simulation completes. # noqa: E501
:param on_complete_action: The on_complete_action of this SimulationDetailed.
:type on_complete_action: object
"""
self._on_complete_action = on_complete_action
@property
def environment(self) -> SimulationEnvironmentDetailed:
"""Gets the environment of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:return: The environment of this SimulationDetailed.
:rtype: SimulationEnvironmentDetailed
"""
return self._environment
@environment.setter
def environment(self, environment: SimulationEnvironmentDetailed):
"""Sets the environment of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:param environment: The environment of this SimulationDetailed.
:type environment: SimulationEnvironmentDetailed
"""
self._environment = environment
@property
def runs(self) -> List[SimulationRunDetailed]:
"""Gets the runs of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:return: The runs of this SimulationDetailed.
:rtype: List[SimulationRunDetailed]
"""
return self._runs
@runs.setter
def runs(self, runs: List[SimulationRunDetailed]):
"""Sets the runs of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:param runs: The runs of this SimulationDetailed.
:type runs: List[SimulationRunDetailed]
"""
self._runs = runs
@property
def queued_task(self) -> TaskDetailed:
"""Gets the queued_task of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:return: The queued_task of this SimulationDetailed.
:rtype: TaskDetailed
"""
return self._queued_task
@queued_task.setter
def queued_task(self, queued_task: TaskDetailed):
"""Sets the queued_task of this SimulationDetailed.
Read only value, expanded on request. | |
from datetime import date, datetime
from decimal import Decimal
import pytest
from django.shortcuts import reverse
from faker import Faker
from freezegun import freeze_time
from pytz import timezone
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.models import (
CreatedOnField,
LastModifiedField,
LongTextField,
MultipleSelectField,
SelectOption,
URLField,
DateField,
EmailField,
FileField,
NumberField,
PhoneNumberField,
FormulaField,
LookupField,
)
@pytest.mark.django_db
def test_text_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(
table=table, order=0, name="Old name", text_default="Default"
)
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": text_field.id}),
{"name": "New name"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["text_default"] == "Default"
@pytest.mark.django_db
def test_long_text_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
fake = Faker()
text = fake.text()
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Long text", "type": "long_text"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "long_text"
assert LongTextField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "Long text 2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": text},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == text
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.long_text_2 == text
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.long_text_2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] is None
row = model.objects.all().last()
assert row.long_text_2 is None
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] is None
row = model.objects.all().last()
assert row.long_text_2 is None
url = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert LongTextField.objects.all().count() == 0
@pytest.mark.django_db
def test_url_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "URL", "type": "url"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "url"
assert URLField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "URL2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": "https://test.nl"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == "https://test.nl"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.url2 == "https://test.nl"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.url2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.url2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.url2 == ""
url = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert URLField.objects.all().count() == 0
@pytest.mark.django_db
def test_date_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Date", "type": "date"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "date"
assert DateField.objects.all().count() == 1
date_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Datetime", "type": "date", "date_include_time": True},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "date"
assert DateField.objects.all().count() == 2
date_time_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{date_field_id}": "2020-04-01 12:00",
f"field_{date_time_field_id}": "2020-04-01",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_json["detail"][f"field_{date_field_id}"][0]["code"] == "invalid"
assert response_json["detail"][f"field_{date_time_field_id}"][0]["code"] == (
"invalid"
)
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{date_field_id}": "2020-04-01",
f"field_{date_time_field_id}": "2020-04-01 14:30:20",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{date_field_id}"] == "2020-04-01"
assert response_json[f"field_{date_time_field_id}"] == "2020-04-01T14:30:20Z"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.date == date(2020, 4, 1)
assert row.datetime == datetime(2020, 4, 1, 14, 30, 20, tzinfo=timezone("UTC"))
url = reverse("api:database:fields:item", kwargs={"field_id": date_time_field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert DateField.objects.all().count() == 1
@pytest.mark.django_db
def test_email_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Email", "type": "email"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "email"
assert EmailField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "Email2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": "<EMAIL>"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == "<EMAIL>"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.email2 == "<EMAIL>"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.email2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.email2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.email2 == ""
email = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(email, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert EmailField.objects.all().count() == 0
@pytest.mark.django_db
def test_file_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
grid = data_fixture.create_grid_view(table=table)
with freeze_time("2020-01-01 12:00"):
user_file_1 = data_fixture.create_user_file(
original_name="test.txt",
original_extension="txt",
unique="sdafi6WtHfnDrU6S1lQKh9PdC7PeafCA",
size=10,
mime_type="text/plain",
is_image=True,
image_width=1920,
image_height=1080,
sha256_hash=(
"a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e"
),
)
user_file_2 = data_fixture.create_user_file()
user_file_3 = data_fixture.create_user_file()
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "File", "type": "file"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "file"
assert FileField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "File2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == []
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": []},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == []
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"without_name": "test"}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"name": "an__invalid__name.jpg"}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{field_id}"][0]["name"][0]["code"] == "invalid"
)
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"name": "not_existing.jpg"}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_USER_FILE_DOES_NOT_EXIST"
assert response_json["detail"] == "The user file not_existing.jpg does not exist."
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"name": user_file_1.name, "is_image": True}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert (
response_json[f"field_{field_id}"][0]["visible_name"]
== user_file_1.original_name
)
assert response_json[f"field_{field_id}"][0]["name"] == (
"sdafi6WtHfnDrU6S1lQKh9PdC7PeafCA_"
"a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e.txt"
)
assert response_json[f"field_{field_id}"][0]["size"] == 10
assert response_json[f"field_{field_id}"][0]["mime_type"] == "text/plain"
assert response_json[f"field_{field_id}"][0]["is_image"] is True
assert response_json[f"field_{field_id}"][0]["image_width"] == 1920
assert response_json[f"field_{field_id}"][0]["image_height"] == 1080
assert response_json[f"field_{field_id}"][0]["uploaded_at"] == (
"2020-01-01T12:00:00+00:00"
)
assert "localhost:8000" in response_json[f"field_{field_id}"][0]["url"]
assert len(response_json[f"field_{field_id}"][0]["thumbnails"]) == 1
assert (
"localhost:8000"
in response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["url"]
)
assert (
"sdafi6WtHfnDrU6S1lQKh9PdC7PeafCA_"
"a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e.txt"
in response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["url"]
)
assert "tiny" in response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["url"]
assert response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["width"] == 21
assert response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["height"] == 21
assert "original_name" not in response_json
assert "original_extension" not in response_json
assert "sha256_hash" not in response_json
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": response_json["id"]},
),
{
f"field_{field_id}": [
{"name": user_file_3.name},
{"name": user_file_2.name, "visible_name": "new_name_1.txt"},
]
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json[f"field_{field_id}"][0]["name"] == user_file_3.name
assert (
response_json[f"field_{field_id}"][0]["visible_name"]
== user_file_3.original_name
)
assert "localhost:8000" in response_json[f"field_{field_id}"][0]["url"]
assert response_json[f"field_{field_id}"][0]["is_image"] is False
assert response_json[f"field_{field_id}"][0]["image_width"] is None
assert response_json[f"field_{field_id}"][0]["image_height"] is None
assert response_json[f"field_{field_id}"][0]["thumbnails"] | |
payment.
if remote_config.initial_msat < calc_fees_for_commitment_tx(
num_htlcs=0,
feerate=feerate,
is_local_initiator=False)[REMOTE]:
raise Exception(
"the funder's amount for the initial commitment transaction "
"is not sufficient for full fee payment")
# The receiving node MUST fail the channel if:
# both to_local and to_remote amounts for the initial commitment transaction are
# less than or equal to channel_reserve_satoshis (see BOLT 3).
if (local_config.initial_msat <= 1000 * payload['channel_reserve_satoshis']
and remote_config.initial_msat <= 1000 * payload['channel_reserve_satoshis']):
raise Exception(
"both to_local and to_remote amounts for the initial commitment "
"transaction are less than or equal to channel_reserve_satoshis")
# note: we ignore payload['channel_flags'], which e.g. contains 'announce_channel'.
# Notably if the remote sets 'announce_channel' to True, we will ignore that too,
# but we will not play along with actually announcing the channel (so we keep it private).
# -> accept channel
# for the first commitment transaction
per_commitment_secret_first = get_per_commitment_secret_from_seed(
local_config.per_commitment_secret_seed,
RevocationStore.START_INDEX
)
per_commitment_point_first = secret_to_pubkey(
int.from_bytes(per_commitment_secret_first, 'big'))
min_depth = 3
self.send_message(
'accept_channel',
temporary_channel_id=temp_chan_id,
dust_limit_satoshis=local_config.dust_limit_sat,
max_htlc_value_in_flight_msat=local_config.max_htlc_value_in_flight_msat,
channel_reserve_satoshis=local_config.reserve_sat,
htlc_minimum_msat=local_config.htlc_minimum_msat,
minimum_depth=min_depth,
to_self_delay=local_config.to_self_delay,
max_accepted_htlcs=local_config.max_accepted_htlcs,
funding_pubkey=local_config.multisig_key.pubkey,
revocation_basepoint=local_config.revocation_basepoint.pubkey,
payment_basepoint=local_config.payment_basepoint.pubkey,
delayed_payment_basepoint=local_config.delayed_basepoint.pubkey,
htlc_basepoint=local_config.htlc_basepoint.pubkey,
first_per_commitment_point=per_commitment_point_first,
accept_channel_tlvs={
'upfront_shutdown_script':
{'shutdown_scriptpubkey': local_config.upfront_shutdown_script}
}
)
# <- funding created
funding_created = await self.wait_for_message('funding_created', temp_chan_id)
# -> funding signed
funding_idx = funding_created['funding_output_index']
funding_txid = bh2u(funding_created['funding_txid'][::-1])
channel_id, funding_txid_bytes = channel_id_from_funding_tx(funding_txid, funding_idx)
constraints = ChannelConstraints(
capacity=funding_sat,
is_initiator=False,
funding_txn_minimum_depth=min_depth
)
outpoint = Outpoint(funding_txid, funding_idx)
chan_dict = self.create_channel_storage(
channel_id, outpoint, local_config, remote_config, constraints)
chan = Channel(
chan_dict,
sweep_address=self.lnworker.sweep_address,
lnworker=self.lnworker,
initial_feerate=feerate
)
chan.storage['init_timestamp'] = int(time.time())
if isinstance(self.transport, LNTransport):
chan.add_or_update_peer_addr(self.transport.peer_addr)
remote_sig = funding_created['signature']
chan.receive_new_commitment(remote_sig, [])
sig_64, _ = chan.sign_next_commitment()
self.send_message('funding_signed',
channel_id=channel_id,
signature=sig_64,
)
self.funding_signed_sent.add(chan.channel_id)
chan.open_with_first_pcp(payload['first_per_commitment_point'], remote_sig)
chan.set_state(ChannelState.OPENING)
self.lnworker.add_new_channel(chan)
async def trigger_force_close(self, channel_id: bytes):
await self.initialized
latest_point = secret_to_pubkey(42) # we need a valid point (BOLT2)
self.send_message(
"channel_reestablish",
channel_id=channel_id,
next_commitment_number=0,
next_revocation_number=0,
your_last_per_commitment_secret=0,
my_current_per_commitment_point=latest_point)
async def reestablish_channel(self, chan: Channel):
await self.initialized
chan_id = chan.channel_id
assert ChannelState.PREOPENING < chan.get_state() < ChannelState.FORCE_CLOSING
if chan.peer_state != PeerState.DISCONNECTED:
self.logger.info(f'reestablish_channel was called but channel {chan.get_id_for_log()} '
f'already in peer_state {chan.peer_state!r}')
return
chan.peer_state = PeerState.REESTABLISHING
util.trigger_callback('channel', self.lnworker.wallet, chan)
# BOLT-02: "A node [...] upon disconnection [...] MUST reverse any uncommitted updates sent by the other side"
chan.hm.discard_unsigned_remote_updates()
# ctns
oldest_unrevoked_local_ctn = chan.get_oldest_unrevoked_ctn(LOCAL)
latest_local_ctn = chan.get_latest_ctn(LOCAL)
next_local_ctn = chan.get_next_ctn(LOCAL)
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
latest_remote_ctn = chan.get_latest_ctn(REMOTE)
next_remote_ctn = chan.get_next_ctn(REMOTE)
assert self.features.supports(LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT)
# send message
if chan.is_static_remotekey_enabled():
latest_secret, latest_point = chan.get_secret_and_point(LOCAL, 0)
else:
latest_secret, latest_point = chan.get_secret_and_point(LOCAL, latest_local_ctn)
if oldest_unrevoked_remote_ctn == 0:
last_rev_secret = 0
else:
last_rev_index = oldest_unrevoked_remote_ctn - 1
last_rev_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - last_rev_index)
self.send_message(
"channel_reestablish",
channel_id=chan_id,
next_commitment_number=next_local_ctn,
next_revocation_number=oldest_unrevoked_remote_ctn,
your_last_per_commitment_secret=last_rev_secret,
my_current_per_commitment_point=latest_point)
self.logger.info(f'channel_reestablish ({chan.get_id_for_log()}): sent channel_reestablish with '
f'(next_local_ctn={next_local_ctn}, '
f'oldest_unrevoked_remote_ctn={oldest_unrevoked_remote_ctn})')
while True:
try:
msg = await self.wait_for_message('channel_reestablish', chan_id)
break
except asyncio.TimeoutError:
self.logger.info('waiting to receive channel_reestablish...')
continue
their_next_local_ctn = msg["next_commitment_number"]
their_oldest_unrevoked_remote_ctn = msg["next_revocation_number"]
their_local_pcp = msg.get("my_current_per_commitment_point")
their_claim_of_our_last_per_commitment_secret = msg.get("your_last_per_commitment_secret")
self.logger.info(f'channel_reestablish ({chan.get_id_for_log()}): received channel_reestablish with '
f'(their_next_local_ctn={their_next_local_ctn}, '
f'their_oldest_unrevoked_remote_ctn={their_oldest_unrevoked_remote_ctn})')
# sanity checks of received values
if their_next_local_ctn < 0:
raise RemoteMisbehaving(f"channel reestablish: their_next_local_ctn < 0")
if their_oldest_unrevoked_remote_ctn < 0:
raise RemoteMisbehaving(f"channel reestablish: their_oldest_unrevoked_remote_ctn < 0")
# Replay un-acked local updates (including commitment_signed) byte-for-byte.
# If we have sent them a commitment signature that they "lost" (due to disconnect),
# we need to make sure we replay the same local updates, as otherwise they could
# end up with two (or more) signed valid commitment transactions at the same ctn.
# Multiple valid ctxs at the same ctn is a major headache for pre-signing spending txns,
# e.g. for watchtowers, hence we must ensure these ctxs coincide.
# We replay the local updates even if they were not yet committed.
unacked = chan.hm.get_unacked_local_updates()
n_replayed_msgs = 0
for ctn, messages in unacked.items():
if ctn < their_next_local_ctn:
# They claim to have received these messages and the corresponding
# commitment_signed, hence we must not replay them.
continue
for raw_upd_msg in messages:
self.transport.send_bytes(raw_upd_msg)
n_replayed_msgs += 1
self.logger.info(f'channel_reestablish ({chan.get_id_for_log()}): replayed {n_replayed_msgs} unacked messages')
we_are_ahead = False
they_are_ahead = False
# compare remote ctns
if next_remote_ctn != their_next_local_ctn:
if their_next_local_ctn == latest_remote_ctn and chan.hm.is_revack_pending(REMOTE):
# We replayed the local updates (see above), which should have contained a commitment_signed
# (due to is_revack_pending being true), and this should have remedied this situation.
pass
else:
self.logger.warning(f"channel_reestablish ({chan.get_id_for_log()}): "
f"expected remote ctn {next_remote_ctn}, got {their_next_local_ctn}")
if their_next_local_ctn < next_remote_ctn:
we_are_ahead = True
else:
they_are_ahead = True
# compare local ctns
if oldest_unrevoked_local_ctn != their_oldest_unrevoked_remote_ctn:
if oldest_unrevoked_local_ctn - 1 == their_oldest_unrevoked_remote_ctn:
# A node:
# if next_revocation_number is equal to the commitment number of the last revoke_and_ack
# the receiving node sent, AND the receiving node hasn't already received a closing_signed:
# MUST re-send the revoke_and_ack.
last_secret, last_point = chan.get_secret_and_point(LOCAL, oldest_unrevoked_local_ctn - 1)
next_secret, next_point = chan.get_secret_and_point(LOCAL, oldest_unrevoked_local_ctn + 1)
self.send_message(
"revoke_and_ack",
channel_id=chan.channel_id,
per_commitment_secret=last_secret,
next_per_commitment_point=next_point)
else:
self.logger.warning(f"channel_reestablish ({chan.get_id_for_log()}): "
f"expected local ctn {oldest_unrevoked_local_ctn}, got {their_oldest_unrevoked_remote_ctn}")
if their_oldest_unrevoked_remote_ctn < oldest_unrevoked_local_ctn:
we_are_ahead = True
else:
they_are_ahead = True
# option_data_loss_protect
def are_datalossprotect_fields_valid() -> bool:
if their_local_pcp is None or their_claim_of_our_last_per_commitment_secret is None:
return False
if their_oldest_unrevoked_remote_ctn > 0:
our_pcs, __ = chan.get_secret_and_point(LOCAL, their_oldest_unrevoked_remote_ctn - 1)
else:
assert their_oldest_unrevoked_remote_ctn == 0
our_pcs = bytes(32)
if our_pcs != their_claim_of_our_last_per_commitment_secret:
self.logger.error(f"channel_reestablish ({chan.get_id_for_log()}): "
f"(DLP) local PCS mismatch: {bh2u(our_pcs)} != {bh2u(their_claim_of_our_last_per_commitment_secret)}")
return False
if chan.is_static_remotekey_enabled():
return True
try:
__, our_remote_pcp = chan.get_secret_and_point(REMOTE, their_next_local_ctn - 1)
except RemoteCtnTooFarInFuture:
pass
else:
if our_remote_pcp != their_local_pcp:
self.logger.error(f"channel_reestablish ({chan.get_id_for_log()}): "
f"(DLP) remote PCP mismatch: {bh2u(our_remote_pcp)} != {bh2u(their_local_pcp)}")
return False
return True
if not are_datalossprotect_fields_valid():
raise RemoteMisbehaving("channel_reestablish: data loss protect fields invalid")
if they_are_ahead:
self.logger.warning(f"channel_reestablish ({chan.get_id_for_log()}): "
f"remote is ahead of us! They should force-close. Remote PCP: {bh2u(their_local_pcp)}")
# data_loss_protect_remote_pcp is used in lnsweep
chan.set_data_loss_protect_remote_pcp(their_next_local_ctn - 1, their_local_pcp)
self.lnworker.save_channel(chan)
chan.peer_state = PeerState.BAD
return
elif we_are_ahead:
self.logger.warning(f"channel_reestablish ({chan.get_id_for_log()}): we are ahead of remote! trying to force-close.")
await self.lnworker.try_force_closing(chan_id)
return
chan.peer_state = PeerState.GOOD
if chan.is_funded() and their_next_local_ctn == next_local_ctn == 1:
self.send_funding_locked(chan)
# checks done
if chan.is_funded() and chan.config[LOCAL].funding_locked_received:
self.mark_open(chan)
util.trigger_callback('channel', self.lnworker.wallet, chan)
# if we have sent a previous shutdown, it must be retransmitted (Bolt2)
if chan.get_state() == ChannelState.SHUTDOWN:
await self.send_shutdown(chan)
def send_funding_locked(self, chan: Channel):
channel_id = chan.channel_id
per_commitment_secret_index = RevocationStore.START_INDEX - 1
per_commitment_point_second = secret_to_pubkey(int.from_bytes(
get_per_commitment_secret_from_seed(chan.config[LOCAL].per_commitment_secret_seed, per_commitment_secret_index), 'big'))
# note: if funding_locked was not yet received, we might send it multiple times
self.send_message("funding_locked", channel_id=channel_id, next_per_commitment_point=per_commitment_point_second)
if chan.is_funded() and chan.config[LOCAL].funding_locked_received:
self.mark_open(chan)
def on_funding_locked(self, chan: Channel, payload):
self.logger.info(f"on_funding_locked. channel: {bh2u(chan.channel_id)}")
if not chan.config[LOCAL].funding_locked_received:
their_next_point = payload["next_per_commitment_point"]
chan.config[REMOTE].next_per_commitment_point = their_next_point
chan.config[LOCAL].funding_locked_received = True
self.lnworker.save_channel(chan)
if chan.is_funded():
self.mark_open(chan)
def on_network_update(self, chan: Channel, funding_tx_depth: int):
"""
Only called when the channel is OPEN.
Runs on the Network thread.
"""
if not chan.config[LOCAL].was_announced and funding_tx_depth >= 6:
# don't announce our channels
# FIXME should this be a field in chan.local_state maybe?
return
chan.config[LOCAL].was_announced = True
self.lnworker.save_channel(chan)
coro = self.handle_announcements(chan)
asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
@log_exceptions
async def handle_announcements(self, chan: Channel):
h, local_node_sig, local_bitcoin_sig = self.send_announcement_signatures(chan)
announcement_signatures_msg = await self.announcement_signatures[chan.channel_id].get()
remote_node_sig = announcement_signatures_msg["node_signature"]
remote_bitcoin_sig = announcement_signatures_msg["bitcoin_signature"]
if not ecc.verify_signature(chan.config[REMOTE].multisig_key.pubkey, remote_bitcoin_sig, h):
raise Exception("bitcoin_sig invalid in announcement_signatures")
if not ecc.verify_signature(self.pubkey, remote_node_sig, h):
raise Exception("node_sig invalid in announcement_signatures")
node_sigs = [remote_node_sig, local_node_sig]
bitcoin_sigs = [remote_bitcoin_sig, local_bitcoin_sig]
bitcoin_keys = [chan.config[REMOTE].multisig_key.pubkey, chan.config[LOCAL].multisig_key.pubkey]
if self.node_ids[0] > self.node_ids[1]:
node_sigs.reverse()
bitcoin_sigs.reverse()
node_ids = list(reversed(self.node_ids))
bitcoin_keys.reverse()
else:
node_ids = self.node_ids
self.send_message("channel_announcement",
node_signatures_1=node_sigs[0],
node_signatures_2=node_sigs[1],
bitcoin_signature_1=bitcoin_sigs[0],
bitcoin_signature_2=bitcoin_sigs[1],
len=0,
#features not set (defaults to zeros)
chain_hash=constants.net.rev_genesis_bytes(),
short_channel_id=chan.short_channel_id,
node_id_1=node_ids[0],
node_id_2=node_ids[1],
bitcoin_key_1=bitcoin_keys[0],
bitcoin_key_2=bitcoin_keys[1]
)
def mark_open(self, chan: Channel):
assert chan.is_funded()
# only allow state transition from "FUNDED" to "OPEN"
old_state = chan.get_state()
if old_state == ChannelState.OPEN:
return
if old_state != ChannelState.FUNDED:
self.logger.info(f"cannot mark open ({chan.get_id_for_log()}), current state: {repr(old_state)}")
return
assert chan.config[LOCAL].funding_locked_received
chan.set_state(ChannelState.OPEN)
util.trigger_callback('channel', self.lnworker.wallet, chan)
# peer may have sent us a channel update for the incoming direction previously
pending_channel_update = self.orphan_channel_updates.get(chan.short_channel_id)
if pending_channel_update:
chan.set_remote_update(pending_channel_update['raw'])
self.logger.info(f"CHANNEL OPENING COMPLETED ({chan.get_id_for_log()})")
forwarding_enabled = self.network.config.get('lightning_forward_payments', False)
if forwarding_enabled:
# send channel_update of outgoing edge to peer,
# so that channel can be used to to receive payments
self.logger.info(f"sending channel update for outgoing edge ({chan.get_id_for_log()})")
chan_upd = chan.get_outgoing_gossip_channel_update()
self.transport.send_bytes(chan_upd)
def send_announcement_signatures(self, chan: Channel):
chan_ann = chan.construct_channel_announcement_without_sigs()
preimage = chan_ann[256+2:]
msg_hash = sha256d(preimage)
bitcoin_signature = ecc.ECPrivkey(chan.config[LOCAL].multisig_key.privkey).sign(msg_hash, sig_string_from_r_and_s)
node_signature = ecc.ECPrivkey(self.privkey).sign(msg_hash, sig_string_from_r_and_s)
self.send_message("announcement_signatures",
channel_id=chan.channel_id,
short_channel_id=chan.short_channel_id,
node_signature=node_signature,
bitcoin_signature=bitcoin_signature
| |
<reponame>vivlai/qanta
import textwrap
from collections import defaultdict, Counter
import argparse
from csv import DictReader
from time import sleep
import os
kSHOW_RIGHT = False
kPAUSE = .25
kSYSTEM = "QANTA"
kBIGNUMBERS = {-1:
"""
88888888
88888888
""",
0:
"""
.n~~%x.
x88X 888.
X888X 8888L
X8888X 88888
88888X 88888X
88888X 88888X
88888X 88888f
48888X 88888
?888X 8888"
"88X 88*`
^"==="`
""",
1:
"""
oe
.@88
==*88888
88888
88888
88888
88888
88888
88888
88888
'**%%%%%%**
""",
2:
"""
.--~*teu.
dF 988Nx
d888b `8888>
?8888> 98888F
"**" x88888~
d8888*`
z8**"` :
:?..... ..F
<""888888888~
8: "888888*
"" "**"`
""",
3:
"""
.x~~"*Weu.
d8Nu. 9888c
88888 98888
"***" 9888%
..@8*"
````"8Weu
.. ?8888L
:@88N '8888N
*8888~ '8888F
'*8"` 9888%
`~===*%"`
""",
4:
"""
xeee
d888R
d8888R
@ 8888R
.P 8888R
:F 8888R
x" 8888R
d8eeeee88888eer
8888R
8888R
"*%%%%%%**~
""",
5:
"""
cuuu....uK
888888888
8*888**"
> .....
Lz" ^888Nu
F '8888k
.. 88888>
@888L 88888
'8888F 8888F
%8F" d888"
^"===*%"`
""",
6:
"""
.ue~~%u.
.d88 z88i
x888E *8888
:8888E ^""
98888E.=tWc.
98888N '888N
98888E 8888E
'8888E 8888E
?888E 8888"
"88& 888"
""==*""
""",
7:
"""
dL ud8Nu :8c
8Fd888888L %8
4N88888888cuR
4F ^""%""d
d .z8
^ z888
d8888'
888888
:888888
888888
'%**%
""",
8:
"""
u+=~~~+u.
z8F `8N.
d88L 98E
98888bu.. .@*
"88888888NNu.
"*8888888888i
.zf""*8888888L
d8F ^%888E
88> `88~
'%N. d*"
^"====="`
""",
9:
"""
.xn!~%x.
x888 888.
X8888 8888:
88888 X8888
88888 88888>
`8888 :88888X
`"**~ 88888>
.xx. 88888
'8888> 8888~
888" :88%
^"===""
"""}
class kCOLORS:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
@staticmethod
def print(text, color="RED", end='\n'):
start = getattr(kCOLORS, color)
print(start + text + kCOLORS.ENDC, end=end)
def write_readable(filename, ids, questions, power):
question_num = 0
o = open(flags.readable, 'w')
for ii in question_ids:
question_num += 1
o.write("%i) " % question_num)
power_found = False
for jj in questions[ii]:
if not power_found and power(ii).lower() in questions[ii][jj].lower():
power_found = True
o.write("%s " %
questions[ii][jj].replace(power(ii), "(*) %s" %
power(ii)))
else:
o.write("%s " % questions[ii][jj])
o.write("\nANSWER: %s\n\n" % questions.answer(ii))
def clear_screen():
print("Clearing")
os.system('cls' if os.name == 'nt' else 'clear')
class PowerPositions:
def __init__(self, filename):
self._power_marks = {}
try:
infile = DictReader(open(filename, 'r'))
for ii in infile:
question = int(ii['question'])
self._power_marks[question] = ii['word']
print("Read power marks from %s: %s ..." %
(filename, str(self._power_marks.keys())[1:69]))
except FileNotFoundError:
pass
def __call__(self, question):
if question in self._power_marks:
return self._power_marks[question]
else:
return ""
# Utilities for single character input
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
sys.stdin.flush()
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
def show_score(left_score, right_score,
left_header="HUMAN", right_header="COMPUTER",
left_color="GREEN", right_color="BLUE",
flush=True):
assert isinstance(left_score, int)
assert isinstance(right_score, int)
if flush:
clear_screen()
# Print the header
print("%-15s" % "", end='')
kCOLORS.print("%-15s" % left_header, left_color, end='')
print("%-30s" % "", end='')
kCOLORS.print("%-15s\n" % right_header, right_color)
for line in range(1, 15):
for num, color in [(left_score, left_color),
(right_score, right_color)]:
for place in [100, 10, 1]:
if place == 100 and num < 0:
val = -1
else:
val = (abs(num) % (place * 10)) // place
kCOLORS.print("%-15s" % kBIGNUMBERS[val].split("\n")[line],
color=color, end=' ')
print("|", end=" ")
print(" ")
class Guess:
def __init__(self, page, evidence, final, weight):
self.page = page
self.evidence = evidence
self.final = final
self.weight = weight
class Buzzes:
def __init__(self, buzz_file):
buzzfile = DictReader(open(buzz_file, 'r'))
self._buzzes = defaultdict(dict)
for r in buzzfile:
question, sent, word = int(r["question"]), int(r["sentence"]), int(r["word"])
if not (sent, word) in self._buzzes[question]:
self._buzzes[question][(sent, word)] = {}
self._buzzes[question][(sent, word)][r["page"]] = \
Guess(r["page"], r["evidence"], int(r["final"]), float(r["weight"]))
def current_guesses(self, question, sent, word):
try:
ss, ww = max(x for x in self._buzzes[question] if
x[0] < sent or (x[0] == sent and x[1] <= max(0, word)))
except ValueError:
return {}
assert (ss, ww) in self._buzzes[question]
return self._buzzes[question][(ss, ww)]
def __iter__(self):
for ii in self._buzzes:
yield ii
def final_guess(self, question):
for ss, ww in sorted(self._buzzes[question], reverse=True):
for bb in self._buzzes[question][(ss, ww)]:
if self._buzzes[question][(ss, ww)][bb].final:
return bb
return None
class Questions:
def __init__(self, question_file):
qfile = DictReader(open(question_file, 'r'))
self._questions = defaultdict(dict)
self._answers = defaultdict(str)
for r in qfile:
self._questions[int(r["id"])][int(r["sent"])] = r["text"]
self._answers[int(r["id"])] = r["answer"].strip()
def __iter__(self):
for qnum in self._questions:
yield qnum
def __getitem__(self, val):
return self._questions[val]
def answer(self, val):
return self._answers[val]
def select_features(evidence_str, allowed_features):
features = evidence_str.split()
included_features = [f for f in features if f in allowed_features]
return ' '.join(included_features)
def format_display(display_num, question_text, sent, word, current_guesses,
answer=None, guess_limit=5, points=10):
sep = "".join(["-"] * 80)
current_text = ""
for ss in range(sent):
current_text += "%s " % question_text[ss]
current_text += " ".join(question_text[sent].split()[:word])
current_text = "\n".join(textwrap.wrap(current_text, 80))
report = "Question %i: %i points\n%s\n%s\n%s\n\n" % \
(display_num, points, sep, current_text, sep)
top_guesses = sorted(current_guesses,
key=lambda x: current_guesses[x].weight, reverse=True)[:guess_limit]
duplicated_feature_counter = Counter()
for g in top_guesses:
evidence = current_guesses[g].evidence.split()
for f in evidence:
duplicated_feature_counter[f] += 1
allowed_features = set()
for k, v in duplicated_feature_counter.items():
if v == 1:
allowed_features.add(k)
if False and len(top_guesses) > 0:
print(top_guesses)
print(allowed_features)
print(duplicated_feature_counter)
raise Exception()
for gg in top_guesses:
guess = current_guesses[gg]
if guess.page == answer:
report += "%s\t%f\t%s\n" % (
"***CORRECT***",
guess.weight,
select_features(guess.evidence, allowed_features)[:100]
)
else:
report += "%s\t%f\t%s\n" % (
guess.page,
guess.weight,
select_features(guess.evidence, allowed_features)[:100]
)
return report
def load_finals(final_file):
f = DictReader(open(final_file))
d = {}
for i in f:
d[int(i['question'])] = i['answer']
return d
def interpret_keypress():
"""
See whether a number was pressed (give terminal bell if so) and return
value. Otherwise returns none. Tries to handle arrows as a single
press.
"""
press = getch()
if press == '\x1b':
getch()
getch()
press = "direction"
if press == 'Q':
raise Exception('Exiting expo by user request from pressing Q')
if press != "direction" and press != " ":
try:
press = int(press)
except ValueError:
press = None
return press
def answer(ans, print_string="%s says:" % kSYSTEM):
if print_string:
print(print_string)
os.system("afplay /System/Library/Sounds/Glass.aiff")
os.system("say %s" % ans.replace("'", "").split("(")[0])
sleep(kPAUSE)
print(ans)
def present_question(display_num, question_id, question_text, buzzes, final,
correct, human=0, computer=0, power="10"):
human_delta = 0
computer_delta = 0
question_value = 15
for ss in sorted(question_text):
words = question_text[ss].split()
for ii, ww in enumerate(words):
if str.lower(ww).startswith(str.lower(power)):
question_value = 10
press = interpret_keypress()
current_guesses = buzzes.current_guesses(question_id, ss, ii - 2)
buzz_now = [x for x in current_guesses.values() if x.final]
assert len(buzz_now) < 2, "Cannot buzz on more than one thing"
if isinstance(press, int):
os.system("afplay /System/Library/Sounds/Glass.aiff")
response = None
while response is None:
response = input("Player %i, provide an answer:\t" % press)
if '+' in response:
return (human + question_value,
computer + computer_delta,
response[1:])
elif '-' in response:
if computer_delta == -5:
return human, computer + computer_delta, response[1:]
else:
human_delta = -5
else:
response = None
# Don't buzz if anyone else has gotten it wrong
elif buzz_now and human_delta == 0 and computer_delta == 0:
show_score(human + human_delta,
computer + computer_delta,
"HUMAN", "COMPUTER")
print(format_display(display_num, question_text, ss, ii + 1,
current_guesses, answer=correct,
points=question_value))
answer(buzz_now[0].page)
if buzz_now[0].page == correct:
print("Computer guesses: %s (correct)" % buzz_now[0].page)
sleep(1)
print(format_display(display_num, question_text, max(question_text), 0,
current_guesses, answer=correct, points=question_value))
return (human + human_delta, computer + question_value,
buzz_now[0].page)
else:
print("Computer guesses: %s (wrong)" % buzz_now[0].page)
sleep(1)
computer_delta = -5
show_score(human + human_delta,
computer + computer_delta,
"HUMAN", "COMPUTER")
print(format_display(display_num, question_text, max(question_text), 0,
current_guesses, answer=correct, points=question_value))
else:
show_score(human + human_delta,
computer + computer_delta,
"HUMAN", "COMPUTER")
print(format_display(display_num, question_text, ss, ii + 1,
current_guesses, answer=correct,
points=question_value))
if computer_delta == 0:
answer(final)
if final == correct:
return human + human_delta, computer + 10, final
else:
print("Incorrect answer: %s" % final)
if human_delta == 0:
response = None
while response is None:
os.system("afplay /System/Library/Sounds/Glass.aiff")
response = input("Player, take a guess:\t")
if '+' in response:
return (human + 10,
computer + computer_delta,
response[1:])
elif '-' in response:
return (human, computer + computer_delta,
response[1:])
else:
response = None
return human + human_delta, computer + computer_delta, ""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--questions', type=str, default='questions.csv')
parser.add_argument('--buzzes', type=str, default="ir_buzz.csv")
parser.add_argument('--skip', type=int, default=0)
parser.add_argument('--output', type=str, default="competition.csv")
parser.add_argument('--finals', type=str, default="finals.csv")
parser.add_argument('--power', type=str, default="power.csv")
parser.add_argument('--max_questions', type=int, default=40)
parser.add_argument('--readable', type=str, default="readable.txt")
flags = parser.parse_args()
questions = Questions(flags.questions)
buzzes = Buzzes(flags.buzzes)
finals = load_finals(flags.finals)
power = PowerPositions(flags.power)
print("Done loading data")
clear_screen()
current_players = set()
if True:
print("Time for a buzzer check")
players_needed = [1, 2, 3, 4]
while len(current_players) < len(players_needed):
print("Player %i, please buzz in" % min(x for | |
not None:
# mask first to only process the stuff that goes into the loss function!
raw_node_in = raw_node_in[readout_mask]
raw_node_out = raw_node_out[readout_mask]
if graph_nodes_list is not None:
graph_nodes_list = graph_nodes_list[readout_mask]
gate_input = torch.cat((raw_node_in, raw_node_out), dim=-1)
gating = torch.sigmoid(self.regression_gate(gate_input))
if not self.use_tanh_readout:
nodewise_readout = gating * self.regression_transform(raw_node_out)
else:
nodewise_readout = gating * torch.tanh(self.regression_transform(raw_node_out))
graph_readout = None
if self.has_graph_labels:
assert graph_nodes_list is not None and num_graphs is not None, 'has_graph_labels requires graph_nodes_list and num_graphs tensors.'
# aggregate via sums over graphs
device = raw_node_out.device
graph_readout = torch.zeros(num_graphs, self.num_classes, device=device)
graph_readout.index_add_(
dim=0, index=graph_nodes_list, source=nodewise_readout
)
if self.use_tanh_readout:
graph_readout = torch.tanh(graph_readout)
return nodewise_readout, graph_readout
class LinearNet(nn.Module):
"""Single Linear layer with WeightDropout, ReLU and Xavier Uniform
initialization. Applies a linear transformation to the incoming data:
:math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
"""
def __init__(self, in_features, out_features, bias=True, dropout=0.0, gain=1.0):
super().__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.gain = gain
self.test = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.test, gain=self.gain)
if self.bias is not None:
# fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
# bound = 1 / math.sqrt(fan_in)
# nn.init.uniform_(self.bias, -bound, bound)
nn.init.zeros_(self.bias)
def forward(self, input):
if self.dropout > 0.0:
w = F.dropout(self.test, p=self.dropout, training=self.training)
else:
w = self.test
return F.linear(input, w, self.bias)
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, dropout={}".format(
self.in_features, self.out_features, self.bias is not None, self.dropout,
)
###########################################
# Mixing in graph-level features to readout
class AuxiliaryReadout(nn.Module):
"""Produces per-graph predictions by combining
the per-graph predictions with auxiliary features.
Note that this AuxiliaryReadout after Readout is probably a bad idea
and BetterAuxiliaryReadout should be used instead."""
def __init__(self, config):
super().__init__()
self.num_classes = config.num_classes
self.aux_in_log1p = getattr(config, "aux_in_log1p", False)
assert (
config.has_graph_labels
), "We expect aux readout in combination with graph labels, not node labels"
self.feed_forward = None
self.batch_norm = nn.BatchNorm1d(config.num_classes + config.aux_in_size)
self.feed_forward = nn.Sequential(
nn.Linear(
config.num_classes + config.aux_in_size, config.aux_in_layer_size,
),
nn.ReLU(),
nn.Dropout(config.output_dropout),
nn.Linear(config.aux_in_layer_size, config.num_classes),
)
def forward(self, graph_features, auxiliary_features):
assert (
graph_features.size()[0] == auxiliary_features.size()[0]
), "every graph needs aux_features. Dimension mismatch."
if self.aux_in_log1p:
auxiliary_features.log1p_()
aggregate_features = torch.cat((graph_features, auxiliary_features), dim=1)
normed_features = self.batch_norm(aggregate_features)
out = self.feed_forward(normed_features)
return out, graph_features
class BetterAuxiliaryReadout(nn.Module):
"""Produces per-graph predictions by combining
the raw GNN Encoder output with auxiliary features.
The difference to AuxReadout(Readout()) is that the aux info
is concat'ed before the nodewise readout and not after the
reduction to graphwise predictions.
"""
def __init__(self, config):
super().__init__()
self.aux_in_log1p = getattr(config, "aux_in_log1p", False)
assert config.has_graph_labels, \
"We expect aux readout in combination with graph labels, not node labels"
self.has_graph_labels = config.has_graph_labels
self.num_classes = config.num_classes
# now with aux_in concat'ed and batchnorm
self.regression_gate = nn.Sequential(
nn.BatchNorm1d(2 * config.hidden_size + config.aux_in_size),
LinearNet(2 * config.hidden_size + config.aux_in_size,
self.num_classes, dropout=config.output_dropout,
)
)
# now with aux_in concat'ed and with intermediate layer
self.regression_transform = nn.Sequential(
nn.BatchNorm1d(config.hidden_size + config.aux_in_size),
LinearNet(config.hidden_size + config.aux_in_size,
config.aux_in_layer_size, dropout=config.output_dropout,
),
nn.ReLU(),
LinearNet(config.aux_in_layer_size, config.num_classes),
)
def forward(self, raw_node_in, raw_node_out, graph_nodes_list, num_graphs, auxiliary_features, readout_mask=None):
assert graph_nodes_list is not None and auxiliary_features is not None, 'need those'
if readout_mask is not None:
# mask first to only process the stuff that goes into the loss function!
raw_node_in = raw_node_in[readout_mask]
raw_node_out = raw_node_out[readout_mask]
if graph_nodes_list is not None:
graph_nodes_list = graph_nodes_list[readout_mask]
if self.aux_in_log1p:
auxiliary_features.log1p_()
aux_by_node = torch.index_select(auxiliary_features, dim=0, index=graph_nodes_list)
# info: the gate and regression include batch norm inside!
gate_input = torch.cat((raw_node_in, raw_node_out, aux_by_node), dim=-1)
gating = torch.sigmoid(self.regression_gate(gate_input))
trafo_input = torch.cat((raw_node_out, aux_by_node), dim=-1)
nodewise_readout = gating * self.regression_transform(trafo_input)
graph_readout = None
if self.has_graph_labels:
assert graph_nodes_list is not None and num_graphs is not None, 'has_graph_labels requires graph_nodes_list and num_graphs tensors.'
# aggregate via sums over graphs
device = raw_node_out.device
graph_readout = torch.zeros(num_graphs, self.num_classes, device=device)
graph_readout.index_add_(
dim=0, index=graph_nodes_list, source=nodewise_readout
)
return nodewise_readout, graph_readout
############################
# GNN Input: Embedding Layers
############################
#class NodeEmbeddingsForPretraining(nn.Module):
# """NodeEmbeddings with added embedding for [MASK] token."""
#
# def __init__(self, config):
# super().__init__()
#
# print("Initializing with random embeddings for pretraining.")
# self.node_embs = nn.Embedding(config.vocab_size + 1, config.emb_size)
#
# def forward(self, vocab_ids):
# embs = self.node_embs(vocab_ids)
# return embs
class NodeEmbeddings(nn.Module):
"""Construct node embeddings from node ids
Args:
pretrained_embeddings (Tensor, optional) – FloatTensor containing weights for
the Embedding. First dimension is being passed to Embedding as
num_embeddings, second as embedding_dim.
Forward
Args:
vocab_ids: <N, 1>
Returns:
node_states: <N, config.hidden_size>
"""
# TODO(github.com/ChrisCummins/ProGraML/issues/27):: Maybe LayerNorm and
# Dropout on node_embeddings?
# TODO(github.com/ChrisCummins/ProGraML/issues/27):: Make selector embs
# trainable?
def __init__(self, config, pretrained_embeddings=None):
super().__init__()
self.inst2vec_embeddings = config.inst2vec_embeddings
self.emb_size = config.emb_size
if config.inst2vec_embeddings == "constant":
print("Using pre-trained inst2vec embeddings frozen.")
assert pretrained_embeddings is not None
assert pretrained_embeddings.size()[0] == 8568, "Wrong number of embs; don't come here with MLM models!"
self.node_embs = nn.Embedding.from_pretrained(
pretrained_embeddings, freeze=True
)
elif config.inst2vec_embeddings == "zero":
init = torch.zeros(config.vocab_size, config.emb_size)
self.node_embs = nn.Embedding.from_pretrained(init, freeze=True)
elif config.inst2vec_embeddings == "constant_random":
init = torch.rand(config.vocab_size, config.emb_size)
self.node_embs = nn.Embedding.from_pretrained(init, freeze=True)
elif config.inst2vec_embeddings == "finetune":
print("Fine-tuning inst2vec embeddings")
assert pretrained_embeddings is not None
assert pretrained_embeddings.size()[0] == 8568, "Wrong number of embs; don't come here with MLM models!"
self.node_embs = nn.Embedding.from_pretrained(
pretrained_embeddings, freeze=False
)
elif config.inst2vec_embeddings == "random":
print("Initializing with random embeddings")
self.node_embs = nn.Embedding(config.vocab_size, config.emb_size)
elif config.inst2vec_embeddings == "none":
print("Initializing with a embedding for statements and identifiers each.")
self.node_embs = nn.Embedding(2, config.emb_size)
else:
raise NotImplementedError(config.inst2vec_embeddings)
def forward(self, vocab_ids, *ignored_args, **ignored_kwargs):
if self.inst2vec_embeddings == 'none':
# map IDs to 1 and everything else to 0
ids = (vocab_ids == 8565).to(torch.long) # !IDENTIFIER token id
embs = self.node_embs(ids)
else: # normal embeddings
embs = self.node_embs(vocab_ids)
return embs
class NodeEmbeddingsWithSelectors(NodeEmbeddings):
"""Construct node embeddings as content embeddings + selector embeddings.
Args:
pretrained_embeddings (Tensor, optional) – FloatTensor containing weights for
the Embedding. First dimension is being passed to Embedding as
num_embeddings, second as embedding_dim.
Forward
Args:
vocab_ids: <N, 1>
selector_ids: <N, 1>
Returns:
node_states: <N, config.hidden_size>
"""
def __init__(self, config, pretrained_embeddings=None):
super().__init__(config, pretrained_embeddings)
self.node_embs = super().forward
assert config.use_selector_embeddings, "This Module is for use with use_selector_embeddings!"
selector_init = torch.tensor(
# TODO(github.com/ChrisCummins/ProGraML/issues/27): x50 is maybe a
# problem for unrolling (for selector_embs)?
[[0, 50.0], [50.0, 0]],
dtype=torch.get_default_dtype(),
)
self.selector_embs = nn.Embedding.from_pretrained(
selector_init, freeze=True
)
def forward(self, vocab_ids, selector_ids):
node_embs = self.node_embs(vocab_ids)
selector_embs = self.selector_embs(selector_ids)
embs = torch.cat((node_embs, selector_embs), dim=1)
return embs
#############################
# Loss Accuracy Prediction
#############################
class Loss(nn.Module):
"""Cross Entropy loss with weighted intermediate loss, and
L2 loss if num_classes is just 1.
"""
def __init__(self, config):
super().__init__()
self.config = config
if config.num_classes == 1:
# self.loss = nn.BCEWithLogitsLoss() # in: (N, *), target: (N, *)
self.loss = nn.MSELoss()
# self.loss = nn.L1Loss()
else:
# class labels '-1' don't contribute to the gradient!
# however in most cases it will be more efficient to gather
# the relevant data into a dense tensor
self.loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='mean')
#loss = F.nll_loss(
# F.log_softmax(logits, dim=-1, dtype=torch.float32),
# targets,
# reduction='mean',
# ignore_index=-1,
#)
def forward(self, logits, targets):
"""inputs: (logits) or (logits, intermediate_logits)"""
if self.config.num_classes == 1:
l = torch.sigmoid(logits[0])
logits = (l, logits[1])
loss = self.loss(logits[0].squeeze(dim=1), targets)
if getattr(self.config, 'has_aux_input', False):
loss = loss + self.config.intermediate_loss_weight * self.loss(
logits[1], targets
)
return loss
class Metrics(nn.Module):
"""Common metrics and info for inspection of results.
Args:
logits, labels
Returns:
(accuracy, pred_targets, correct_preds, targets)"""
def __init__(self):
super().__init__()
def forward(self, logits, labels, runtimes=None):
# be flexible with 1hot labels vs indices
if len(labels.size()) == 2:
targets = labels.argmax(dim=1)
elif len(labels.size()) == 1:
targets = labels
else:
raise ValueError(f"labels={labels.size()} tensor is is neither 1 nor 2-dimensional. :/")
pred_targets = logits.argmax(dim=1)
correct_preds = targets.eq(pred_targets).float()
accuracy = torch.mean(correct_preds)
ret = accuracy, correct_preds, targets
if runtimes is not None:
assert runtimes.size() == logits.size(), \
f"We need to have a runtime for each sample and every possible label!" \
f"runtimes={runtimes.size()}, logits={logits.size()}."
#actual = runtimes[pred#torch.index_select(runtimes, dim=1, index=pred_targets)
actual = torch.gather(runtimes, dim=1, index=pred_targets.view(-1, 1)).squeeze()
#actual = runtimes[:, pred_targets]
optimal = | |
if m.group(1) == "Compendium" and m.group(3):
(system, entrytype, idnum) = m.group(2).split(".", 2)
if args.compendium:
slug = uuid.uuid5(moduuid, idnum)
else:
slug = slugify(m.group(3))
entrytype = entrytype.lower().replace("actor", "monster").rstrip("s")
if "packs" in mod:
for p in mod["packs"]:
if p["name"] == entrytype and p["entity"] == "Actor":
entrytype = "monster"
elif p["name"] == entrytype and p["entity"] == "Item":
entrytype = "item"
for i in items:
if i["_id"] == idnum and i["type"].lower() == "spell":
entrytype = "spell"
return '<a href="/{}/{}">{}</a>'.format(entrytype, slug, m.group(3))
if m.group(1) == "Item":
for i in items:
if i["_id"] == m.group(2) or i["name"] == m.group(2):
return '<a href="/item/{}">{}</a>'.format(
uuid.uuid5(moduuid, i["_id"])
if args.compendium
else slugify(i["name"]),
m.group(3) or i["name"],
)
if m.group(1) == "Macro":
if m.group(3):
return "<details><summary>{}</summary>This was a Foundry Macro, which cannot be converted.</details>".format(
m.group(3)
)
else:
return "<details><summary>Unsupported</summary>This was a Foundry Macro, which cannot be converted.</details>"
return m.group(0)
if args.gui and len(folders) > 0:
worker.outputLog("Converting folders")
for f in folders:
order += 1
if args.gui:
worker.updateProgress((order / len(folders)) * 5)
print(
"\rCreating Folders [{}/{}] {:.0f}%".format(
order, len(folders), order / len(folders) * 100
),
file=sys.stderr,
end="",
)
if f["type"] not in ["JournalEntry", "RollTable", "Scene"]:
continue
folder = ET.SubElement(
module,
"group",
{"id": str(uuid.uuid5(moduuid, f["_id"])), "sort": str(int(f["sort"]))},
)
ET.SubElement(folder, "name").text = f["name"]
if f["parent"] is not None:
folder.set("parent", str(uuid.uuid5(moduuid, f["parent"])))
order = 0
if len(journal) > 0 and args.gui:
worker.outputLog("Converting journal")
for j in journal:
order += 1
if args.gui:
worker.updateProgress(5 + (order / len(journal)) * 10)
if "$$deleted" in j and j["$$deleted"]:
continue
if not j["content"] and ("img" not in j or not j["img"]):
continue
print(
"\rConverting journal [{}/{}] {:.0f}%".format(
order, len(journal), order / len(journal) * 100
),
file=sys.stderr,
end="",
)
page = ET.SubElement(
module,
"page",
{"id": str(uuid.uuid5(moduuid, j["_id"])), "sort": str(j["sort"] or order)},
)
if "folder" in j and j["folder"] is not None:
page.set("parent", str(uuid.uuid5(moduuid, j["folder"])))
ET.SubElement(page, "name").text = j["name"]
ET.SubElement(page, "slug").text = slugify(j["name"])
content = ET.SubElement(page, "content")
content.text = j["content"] or ""
content.text = re.sub(
r'<a(.*?)data-entity="?(.*?)"? (.*?)data-id="?(.*?)"?( .*?)?>',
fixLink,
content.text,
)
content.text = re.sub(r"@(.*?)\[(.*?)\](?:\{(.*?)\})?", fixFTag, content.text)
content.text = re.sub(
r"\[\[(?:/(?:gm)?r(?:oll)? )?(.*?)(?: ?# ?(.*?))?\]\]",
fixRoll,
content.text,
)
if "img" in j and j["img"]:
content.text += '<img src="{}">'.format(j["img"])
order = 0
maxorder = len(folders) + len(journal) if not maxorder else maxorder
if len(playlists) > 0:
if args.gui:
worker.outputLog("Converting playlists")
playlistsbaseslug = "playlists"
playlistsslug = playlistsbaseslug + str(
len([i for i in slugs if playlistsbaseslug in i])
)
playlistsgroup = str(uuid.uuid5(moduuid, playlistsslug))
group = ET.SubElement(
module, "group", {"id": playlistsgroup, "sort": str(int(maxorder + 1))}
)
ET.SubElement(group, "name").text = "Playlists"
ET.SubElement(group, "slug").text = playlistsslug
for p in playlists:
order += 1
if args.gui:
worker.updateProgress(15 + (order / len(playlists)) * 10)
if "$$deleted" in p and p["$$deleted"]:
continue
print(
"\rConverting playlists [{}/{}] {:.0f}%".format(
order, len(playlists), order / len(playlists) * 100
),
file=sys.stderr,
end="",
)
page = ET.SubElement(
module,
"page",
{
"id": str(uuid.uuid5(moduuid, p["_id"])),
"parent": playlistsgroup,
"sort": str(p["sort"] if "sort" in p and p["sort"] else order),
},
)
ET.SubElement(page, "name").text = p["name"]
ET.SubElement(page, "slug").text = slugify(p["name"])
content = ET.SubElement(page, "content")
content.text = "<h1>{}</h1>".format(p["name"])
content.text += "<table><thead><tr><td>"
content.text += "Track"
content.text += "</td>"
content.text += "</tr></thead><tbody>"
for s in p["sounds"]:
content.text += "<tr>"
content.text += "<td><figure>"
content.text += "<figcaption>{}</figcaption>".format(s["name"])
if not os.path.exists(s["path"]) and os.path.exists(
os.path.splitext(s["path"])[0] + ".mp4"
):
s["path"] = os.path.splitext(s["path"])[0] + ".mp4"
if os.path.exists(s["path"]):
if magic.from_file(
os.path.join(tempdir, urllib.parse.unquote(s["path"])), mime=True
) not in [
"audio/mp3",
"audio/mpeg",
"audio/wav",
"audio/mp4",
"video/mp4",
]:
try:
ffp = subprocess.Popen(
[
ffmpeg_path,
"-v",
"error",
"-i",
s["path"],
"-acodec",
"aac",
os.path.splitext(s["path"])[0] + ".mp4",
],
startupinfo=startupinfo,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
stdin=subprocess.DEVNULL,
)
ffp.wait()
s["path"] = os.path.splitext(s["path"])[0] + ".mp4"
except Exception:
print("Could not convert to MP4")
content.text += (
'<audio controls {}><source src="{}" type="{}"></audio>'.format(
" loop" if s["repeat"] else "",
s["path"],
magic.from_file(
os.path.join(tempdir, urllib.parse.unquote(s["path"])),
mime=True,
),
)
)
else:
content.text += '<audio controls {}><source src="{}"></audio>'.format(
" loop" if s["repeat"] else "", s["path"]
)
content.text += "</figure></td>"
content.text += "</tr>"
content.text += "</tbody></table>"
order = 0
if len(tables) > 0:
if args.gui:
worker.outputLog("Converting roll tables")
tablesbaseslug = "tables"
tablesslug = tablesbaseslug + str(
len([i for i in slugs if tablesbaseslug in i])
)
tablesgroup = str(uuid.uuid5(moduuid, tablesslug))
group = ET.SubElement(
module, "group", {"id": tablesgroup, "sort": str(int(maxorder + 1))}
)
ET.SubElement(group, "name").text = "Roll Tables"
ET.SubElement(group, "slug").text = tablesslug
for t in tables:
order += 1
if args.gui:
worker.updateProgress(25 + (order / len(tables)) * 10)
if "$$deleted" in t and t["$$deleted"]:
continue
print(
"\rConverting tables [{}/{}] {:.0f}%".format(
order, len(tables), order / len(tables) * 100
),
file=sys.stderr,
end="",
)
page = ET.SubElement(
module,
"page",
{
"id": str(uuid.uuid5(moduuid, t["_id"])),
"parent": tablesgroup,
"sort": str(t["sort"] if "sort" in t and t["sort"] else order),
},
)
if "folder" in t and t["folder"]:
page.set("parent", str(uuid.uuid5(moduuid, t["folder"])))
ET.SubElement(page, "name").text = t["name"]
ET.SubElement(page, "slug").text = slugify(t["name"])
content = ET.SubElement(page, "content")
content.text = "<h1>{}</h1>".format(t["name"])
content.text += "<table><thead><tr><td>"
content.text += '<a href="/roll/{0}/{1}">{0}</a>'.format(
t["formula"], t["name"]
)
content.text += '</td><td colspan="2" align="center">{}</td>'.format(t["name"])
content.text += "</tr></thead><tbody>"
for r in t["results"]:
content.text += "<tr>"
content.text += "<td>{}</td>".format(
"{}-{}".format(*r["range"])
if r["range"][0] != r["range"][1]
else r["range"][0]
)
content.text += "<td>"
linkMade = False
if "collection" in r:
if r["collection"] == "dnd5e.monsters":
content.text += '<a href="/monster/{}">{}</a>'.format(
slugify(r["text"]), r["text"]
)
linkMade = True
elif r["collection"] == "Actor":
for a in actors:
if a["_id"] == r["resultId"]:
content.text += '<a href="/monster/{}">{}</a>'.format(
slugify(a["name"]), r["text"]
)
linkMade = True
elif r["collection"] == "Item":
for i in items:
if i["_id"] == r["resultId"]:
content.text += '<a href="/item/{}">{}</a>'.format(
slugify(i["name"]), r["text"]
)
linkMade = True
if not linkMade:
content.text += "{}".format(r["text"] if r["text"] else " ")
content.text += "</td>"
if "img" in r and os.path.exists(r["img"]):
content.text += (
'<td style="width:50px;height:50px;"><img src="{}"></td>'.format(
r["img"]
)
)
else:
content.text += '<td style="width:50px;height:50px;"> </td>'
content.text += "</tr>"
content.text += "</tbody></table>"
content.text = re.sub(
r"\[\[(?:/(?:gm)?r(?:oll)? )?(.*?)(?: ?# ?(.*?))?\]\]",
fixRoll,
content.text,
)
if "media" in mod:
for media in mod["media"]:
if "url" not in media and "link" in media:
media["url"] = media["link"]
if media["type"] == "cover" and media["url"]:
def progress(block_num, block_size, total_size):
pct = 100.00 * ((block_num * block_size) / total_size)
print(
"\rDownloading cover {:.2f}%".format(pct),
file=sys.stderr,
end="",
)
if urllib.parse.urlparse(media['url']).scheme:
urllib.request.urlretrieve(
media["url"],
os.path.join(tempdir, os.path.basename(media["url"]).lower()),
progress,
)
else:
shutil.copy(os.path.join(os.path.join(moduletmp, mod["name"]),os.path.basename(media["url"])),os.path.join(tempdir,os.path.basename(media["url"]).lower()))
if args.packdir:
shutil.copy(os.path.join(tempdir,os.path.basename(media["url"].lower())),os.path.join(packdir,os.path.basename(media["url"]).lower()))
modimage.text = os.path.basename(media["url"]).lower()
mapcount = 0
if len(maps) > 0:
if args.gui:
worker.outputLog("Converting maps")
if any([journal, folders, tables, playlists]) and not any(
[x["folder"] for x in maps if "folder" in x]
):
mapsbaseslug = "maps"
mapsslug = mapsbaseslug + str(len([i for i in slugs if mapsbaseslug in i]))
mapgroup = str(uuid.uuid5(moduuid, mapsslug))
group = ET.SubElement(
module, "group", {"id": mapgroup, "sort": str(int(maxorder + 2))}
)
ET.SubElement(group, "name").text = "Maps"
ET.SubElement(group, "slug").text = mapsslug
else:
mapgroup = None
for map in maps:
if "$$deleted" in map and map["$$deleted"]:
continue
if not modimage.text and map["name"].lower() in args.covernames:
if args.gui:
worker.outputLog("Generating cover image")
print("\rGenerating cover image", file=sys.stderr, end="")
if not os.path.exists(
urllib.parse.unquote(map["img"] or map["tiles"][0]["img"])
):
if os.path.exists(
os.path.splitext(
urllib.parse.unquote(map["img"] or map["tiles"][0]["img"])
)[0]
+ args.jpeg
):
map["img"] = os.path.splitext(map["img"])[0] + args.jpeg
with PIL.Image.open(
urllib.parse.unquote(map["img"] or map["tiles"][0]["img"])
) as img:
if img.width <= img.height:
img = img.crop((0, 0, img.width, img.width))
else:
img = img.crop((0, 0, img.height, img.height))
if img.width > 1024:
img = img.resize((1024, 1024))
if args.jpeg == ".jpg" and img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.save(os.path.join(tempdir, "module_cover" + args.jpeg))
modimage.text = "module_cover" + args.jpeg
if not map["img"] and len(map["tiles"]) == 0:
continue
mapcount += 1
sys.stderr.write("\033[K")
if args.gui:
worker.updateProgress(35 + (mapcount / len(maps)) * 35)
print(
"\rConverting maps [{}/{}] {:.0f}%".format(
mapcount, len(maps), mapcount / len(maps) * 100
),
file=sys.stderr,
end="",
)
createMap(map, mapgroup)
while True:
removed = False
for g in module.iter("group"):
gInUse = False
for tag in ["page", "map", "group", "asset"]:
for p in module.iter(tag):
if p.get("parent") == g.get("id"):
gInUse = True
break
if gInUse:
break
if gInUse:
continue
module.remove(g)
removed = True
if not removed:
break
if not modimage.text and len(maps) > 0:
randomok = False
loopcount = len(maps)*5
while not randomok:
loopcount -= 1
if loopcount < 0: break
map = random.choice(maps)
while "$$deleted" in map and mapcount > 0:
map = random.choice(maps)
if not os.path.exists(
urllib.parse.unquote(map["img"] or | |
Exception as e:
logger.error('error :: metrics_manager :: sync_cluster_files - failed to determine authorative_host for %s - %s' % (
str(metric), e))
if authorative_host:
USE_REMOTE_SKYLINE_INSTANCES = []
for remote_skyline_instance in REMOTE_SKYLINE_INSTANCES:
try:
if remote_skyline_instance[3] == authorative_host:
USE_REMOTE_SKYLINE_INSTANCES.append(remote_skyline_instance)
except Exception as e:
logger.error('error :: metrics_manager :: sync_cluster_files - failed to the remote_skyline_instance list for the authorative_host - %s - %s' % (
str(authorative_host), e))
for remote_skyline_instance in REMOTE_SKYLINE_INSTANCES:
try:
if remote_skyline_instance[3] != authorative_host:
USE_REMOTE_SKYLINE_INSTANCES.append(remote_skyline_instance)
except Exception as e:
logger.error('error :: metrics_manager :: sync_cluster_files - failed to the secondary remote_skyline_instance list for the authorative_host - %s - %s' % (
str(authorative_host), e))
for remote_skyline_instance in USE_REMOTE_SKYLINE_INSTANCES:
if endpoint:
data_required = 'files'
save_file = False
host_features_profile_files = {}
try:
host_features_profile_files = self.get_remote_data(remote_skyline_instance, data_required, endpoint, save_file)
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: sync_cluster_files - failed to get %s from %s' % (
endpoint, str(remote_skyline_instance[0])))
host_features_profile_files = {}
if host_features_profile_files:
for features_profile_file in host_features_profile_files:
known_features_profile_file = False
try:
known_features_profile_file = features_profile_files[features_profile_file]
except:
known_features_profile_file = None
if not known_features_profile_file:
try:
features_profile_files[features_profile_file] = host_features_profile_files[features_profile_file]
except Exception as e:
logger.error('error :: metrics_manager :: sync_cluster_files - failed to add features_profile_file %s to features_profile_files - %s' % (
str(features_profile_file), e))
if len(features_profile_files) > 0:
logger.info('%s features profile files found to download from %s to %s' % (
str(len(features_profile_files)), remote_skyline_instance[0],
features_profile_dir))
# break
else:
logger.info('%s features profile files found to download from %s to %s' % (
str(len(features_profile_files)), remote_skyline_instance[0],
features_profile_dir))
if features_profile_files and FAKE_CLUSTER_SYNC:
features_profile_files = {}
logger.info('FAKE_CLUSTER_SYNC reseting features_profile_files with %s items found' % str(len(features_profile_files)))
if FAKE_CLUSTER_SYNC and not features_profile_files:
logger.info('FAKE_CLUSTER_SYNC no features_profile_files found')
if FAKE_CLUSTER_SYNC:
try:
self.redis_conn.hset(
'analyzer.metrics_manager.local_features_profile_dirs',
expected_features_profile_dir, expected_dir)
logger.info('metrics_manager :: %s exists locally, added to analyzer.metrics_manager.local_features_profile_dirs' % expected_dir)
continue
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add entry in analyzer.metrics_manager.local_features_profile_dirs for - %s' % str(expected_dir))
features_profile_files = {}
continue
if features_profile_files:
files_to_fetch = len(features_profile_files)
files_fetched = 0
files_present = 0
logger.info('metrics_manager :: sync_cluster_files - %s features_profile files to fetch for fp_id %s from %s' % (
str(files_to_fetch), str(fp_id),
str(remote_skyline_instance[0])))
data_required = 'file_saved'
for remote_file in features_profile_files:
try:
data_file = features_profile_files[remote_file]
data_dir = os.path.dirname(data_file)
if not os.path.exists(data_dir):
mkdir_p(data_dir)
if not os.path.exists(data_dir):
logger.error('error :: metrics_manager :: sync_cluster_files - failed to create dir - %s' % data_dir)
continue
endpoint = 'ionosphere_file?file=%s' % str(data_file)
# Only fetch it if it does not exist
if not os.path.isfile(data_file):
data_required = 'file_saved'
file_saved = False
file_saved = self.get_remote_data(remote_skyline_instance, data_required, endpoint, data_file)
if not file_saved:
logger.error('error :: metrics_manager :: sync_cluster_files - failed to get %s from %s' % (
data_file, str(remote_skyline_instance[0])))
continue
else:
logger.info('metrics_manager :: sync_cluster_files - got %s from %s' % (
data_file, str(remote_skyline_instance[0])))
files_fetched += 1
else:
files_present += 1
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: sync_cluster_files - failed to get %s from %s' % (
endpoint, str(remote_skyline_instance[0])))
if (files_fetched + files_present) == files_to_fetch:
logger.info('metrics_manager :: sync_cluster_files - got all %s features_profile files that needed to be fetch (%s were already present) for %s from %s' % (
str(files_to_fetch), str(files_present), str(fp_id),
str(remote_skyline_instance[0])))
try:
self.redis_conn.hset(
'analyzer.metrics_manager.local_features_profile_dirs',
expected_features_profile_dir, expected_dir)
logger.info('metrics_manager :: %s features profile dir exists locally, added to analyzer.metrics_manager.local_features_profile_dirs' % str(expected_dir))
continue
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add entry in analyzer.metrics_manager.local_features_profile_dirs for - %s' % str(expected_dir))
try:
self.redis_conn.hset(
'analyzer.metrics_manager.local_features_profile_dirs.to_update',
expected_features_profile_dir, expected_dir)
logger.info('metrics_manager :: %s features profile, added to analyzer.metrics_manager.local_features_profile_dirs.to_update' % str(expected_dir))
continue
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add entry in analyzer.metrics_manager.local_features_profile_dirs for - %s' % str(expected_dir))
in_local_features_profile_dirs_to_update = False
try:
for local_features_profile_dir_to_update in local_features_profile_dirs_to_update:
if local_features_profile_dir_to_update == expected_features_profile_dir:
in_local_features_profile_dirs_to_update = True
try:
self.redis_conn.hdel(
'analyzer.metrics_manager.local_features_profile_dirs.to_update',
expected_features_profile_dir, expected_dir)
logger.info('metrics_manager :: %s features profile, removed from analyzer.metrics_manager.local_features_profile_dirs.to_update' % str(expected_dir))
continue
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add entry in analyzer.metrics_manager.local_features_profile_dirs for - %s' % str(expected_dir))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to determine if entry should be remove from in analyzer.metrics_manager.local_features_profile_dirs.to_update')
if not in_local_features_profile_dirs_to_update:
try:
self.redis_conn.hset(
'analyzer.metrics_manager.local_features_profile_dirs.to_update',
expected_features_profile_dir, expected_dir)
logger.info('metrics_manager :: %s features profile, added to analyzer.metrics_manager.local_features_profile_dirs.to_update' % str(expected_dir))
continue
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add entry in analyzer.metrics_manager.local_features_profile_dirs for - %s' % str(expected_dir))
else:
logger.error('error :: metrics_manager :: sync_cluster_files - failed to get all %s features_profile files for %s from %s' % (
str(files_to_fetch), str(fp_id), str(remote_skyline_instance[0])))
if files_fetched > 0:
fps_fetched += 1
logger.info('metrics_manager :: features_profiles_dir done')
spin_end = time() - spin_start
logger.info('metrics_manager :: sync_cluster_files took %.2f seconds' % spin_end)
return
def metric_management_process(self, i):
"""
Create and manage the required lists and Redis sets
"""
spin_start = time()
logger.info('metrics_manager :: metric_management_process started')
last_run_timestamp = 0
try:
last_run_timestamp = self.redis_conn_decoded.get('analyzer.metrics_manager.last_run_timestamp')
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to generate a list from %s Redis set' % full_uniques)
last_run_timestamp = 0
if last_run_timestamp:
logger.info('metrics_manager :: analyzer.metrics_manager.last_run_timestamp Redis key has not expired, not running')
return
unique_metrics = []
try:
unique_metrics = list(self.redis_conn_decoded.smembers(full_uniques))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to generate a list from %s Redis set' % full_uniques)
unique_metrics = []
# Check if this process is unnecessary
if len(unique_metrics) == 0:
logger.error('error :: metrics_manager :: there are no metrics in %s Redis set' % full_uniques)
return
####
# Check whether any alert settings or metrics have been changed, added
# or removed. If so do a full refresh.
####
refresh_redis_alert_sets = False
####
# Create a list of base_names from the unique_metrics
####
# @added 20200723 - Feature #3560: External alert config
# Speed this up only check alerters if not already in the set
unique_base_names = []
logger.info('metrics_manager :: creating unique_base_names list')
for metric_name in unique_metrics:
# @added 20191014 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
if python_version == 3:
metric_name = str(metric_name)
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
# @added 20200723 - Feature #3560: External alert config
# Speed this up only check alerters if not already in the set
# metric_in_smtp_alerters_set = False
unique_base_names.append(base_name)
logger.info('metrics_manager :: created unique_base_names list of %s metrics' % str(len(unique_base_names)))
# @added 20210308 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
# Make a Redis set of unique_base_names for other apps to use
if unique_base_names:
unique_base_names = list(set(unique_base_names))
logger.info('metrics_manager :: recreating the analyzer.unique_base_names set')
try:
self.redis_conn.sadd('new_analyzer.unique_base_names', *set(unique_base_names))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add multiple members to the new_analyzer.unique_base_names Redis set')
try:
self.redis_conn.delete('new_analyzer.unique_base_names.old')
except:
pass
try:
self.redis_conn.rename('analyzer.unique_base_names', 'analyzer.unique_base_names.old')
except:
pass
try:
self.redis_conn.rename('new_analyzer.unique_base_names', 'analyzer.unique_base_names')
except:
pass
try:
self.redis_conn.delete('unique_base_names.old')
except:
pass
try:
self.redis_conn.sunionstore('aet.analyzer.unique_base_names', 'analyzer.unique_base_names')
logger.info('metrics_manager :: copied Redis set analyzer.unique_base_names to aet.analyzer.unique_base_names via sunion')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to copy Redis set analyzer.unique_base_names to aet.analyzer.unique_base_names via sunion')
#####
# Check whether any internal or external alert settings have been changed
# if so do a full refresh
####
# @added 20200528 - Feature #3560: External alert config
external_alerts = {}
external_from_cache = None
internal_alerts = {}
internal_from_cache = None
all_alerts = list(settings.ALERTS)
all_from_cache = None
if EXTERNAL_ALERTS:
try:
external_alerts, external_from_cache, internal_alerts, internal_from_cache, all_alerts, all_from_cache = get_external_alert_configs(skyline_app)
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: could not determine external alert configs')
logger.info('metrics_manager :: retrieved %s external_alerts configurations from_cache %s, %s internal_alerts from_cache %s and %s all_alerts from_cache %s' % (
str(len(external_alerts)), str(external_from_cache),
str(len(internal_alerts)), str(internal_from_cache),
str(len(all_alerts)), str(all_from_cache)))
if LOCAL_DEBUG:
logger.debug('debug :: metrics_manager :: all_alerts :: %s' % str(all_alerts))
if not all_alerts:
logger.error('error :: metrics_manager :: all_alerts is not set, so creating from settings.ALERTS')
all_alerts = list(settings.ALERTS)
# If there was a last known alerts configuration compare it to the
# current known alerts configuration if they are different do a full
# refresh
# @added 20201017 - Feature #3788: snab_flux_load_test
# Feature #3560: External alert config
last_all_alerts_set = None
try:
last_all_alerts_data = self.redis_conn_decoded.get('analyzer.last_all_alerts')
if last_all_alerts_data:
last_all_alerts = literal_eval(last_all_alerts_data)
# A normal sorted nor set can be used as the list has dicts in it
last_all_alerts_set = sorted(last_all_alerts, key=lambda item: item[0])
logger.info('metrics_manager :: last_all_alerts_set from analyzer.last_all_alerts Redis set has | |
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel lib provides a way to read and write SavedModels.
This is an internal Hub utility and not part of the public API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
from absl import logging
import tensorflow as tf
from tensorflow_hub import module_attachment_pb2
from tensorflow_hub import tf_utils
from tensorflow_hub import tf_v1
from google.protobuf import message
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
# A collection of pairs (key: string, definition : SignatureDef) used internally
# to propagate signatures defined in a Graph to SavedModel. The collection key
# is a tuple (not a string) in order to make it invisible from user apis such
# as `get_all_collection_keys()` and manual exporting to meta_graphs.
_SIGNATURE_COLLECTION = ("__saved_model_lib_signatures",)
# A collection of ModuleAttachment protos is used internally to collect
# the (key, value) pairs passed to attach_message() calls from the module_fn.
# As above, it gets a non-string name to make it invisible within module_fn.
_ATTACHMENT_COLLECTION_INTERNAL = ("__hub_module_attachments",)
# The ModuleAttachment protos are stored in SavedModel.meta_graphs (but never
# in tf.Graphs) as CollectionDef.bytes_list under this key.
ATTACHMENT_COLLECTION_SAVED = "hub_module_attachments"
def get_variables_path(export_dir):
"""Returns the path for storing variables checkpoints."""
return os.path.join(
tf.compat.as_bytes(export_dir),
tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_DIRECTORY),
tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_FILENAME))
def _get_assets_dir(export_dir):
return os.path.join(
tf.compat.as_bytes(export_dir),
tf.compat.as_bytes(tf_v1.saved_model.constants.ASSETS_DIRECTORY))
def _get_asset_filename(export_dir, asset_filename):
assets_dir = _get_assets_dir(export_dir)
filename = os.path.join(
tf.compat.as_bytes(assets_dir),
tf.compat.as_bytes(asset_filename))
if not tf_utils.absolute_path(filename).startswith(
tf_utils.absolute_path(assets_dir)):
raise ValueError(
"Asset filename (%s) points outside assets_dir" % asset_filename)
logging.debug("Asset filename: %s", filename)
return filename
def _get_saved_model_proto_path(export_dir):
return os.path.join(
tf.compat.as_bytes(export_dir),
tf.compat.as_bytes(tf_v1.saved_model.constants.SAVED_MODEL_FILENAME_PB))
def _get_node_name_from_tensor(tensor_name):
"""tensor_name must have format node_name:output_number. Returns node_name."""
result = re.match(r"([^:]*):\d+$", tensor_name)
if not result:
raise ValueError(
"Unexpected format for tensor name. Expected node_name:output_number. "
"Got %r" % tensor_name)
return result.group(1)
def add_signature(key, inputs, outputs):
"""Adds a signature to current graph.
Args:
key: Signature key as a string.
inputs: Signature inputs as a map from string to Tensor or SparseTensor.
outputs: Signature outputs as a map from string to Tensor or SparseTensor.
(Recall that a Variable is not a Tensor, but Variable.value() is.)
Raises:
TypeError: if the arguments have the wrong types.
"""
_check_dict_maps_to_tensors_or_sparse_tensors(inputs)
_check_dict_maps_to_tensors_or_sparse_tensors(outputs)
input_info = {
input_name: tf_v1.saved_model.utils.build_tensor_info(tensor)
for input_name, tensor in inputs.items()
}
output_info = {
output_name: tf_v1.saved_model.utils.build_tensor_info(tensor)
for output_name, tensor in outputs.items()
}
signature = tf_v1.saved_model.signature_def_utils.build_signature_def(
input_info, output_info)
tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature))
def _check_dict_maps_to_tensors_or_sparse_tensors(tensor_map):
for key, value in tensor_map.items():
if not isinstance(value, (tf.Tensor, tf.SparseTensor)):
raise TypeError(
"Value for key '%s' should be a Tensor or SparseTensor object, found"
" %s." % (key, type(value)))
def _export_signatures(meta_graph):
"""Exports signatures from current graph into a MetaGraphDef."""
named_signatures = tf_v1.get_collection(_SIGNATURE_COLLECTION)
if not named_signatures:
raise ValueError("No signatures present. Please call hub.add_signature(...)"
"at least once in the module_fn.")
for key, signature in named_signatures:
meta_graph.signature_def[key].CopyFrom(signature)
def attach_bytes(key, the_bytes):
"""Adds a ModuleAttachment to the current graph.
Args:
key: A string with the unique key of the attachment.
the_bytes: A bytes object with the serialized attachment.
"""
tf_v1.add_to_collection(
_ATTACHMENT_COLLECTION_INTERNAL,
module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
def _export_module_attachments(meta_graph):
"""Exports ModuleAttachments from the current tf.Graph into `meta_graph`."""
added_attachments = tf_v1.get_collection(_ATTACHMENT_COLLECTION_INTERNAL)
if not added_attachments: return # Don't touch `meta_graph`.
unique_attachments = collections.OrderedDict( # Avoid indeterminism.
(attachment.key, attachment)
for attachment in added_attachments)
meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED].bytes_list.value[:] = [
attachment.SerializeToString()
for attachment in unique_attachments.values()]
def get_attached_bytes_map(meta_graph):
"""Returns the dict of ModuleAttachments stored in `meta_graph`.
Args:
meta_graph: A MetaGraphDef, as built by SavedModelHandler.add_graph_copy()
from some graph.
Returns:
A dict, containing the `(key, bytes)` items passed to `attach_bytes()`
when the graph had been built.
Raises:
ValueError: if `meta-graph` is malformed.
"""
result = {}
if ATTACHMENT_COLLECTION_SAVED not in meta_graph.collection_def:
return result
collection_def = meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED]
if collection_def.WhichOneof("kind") != "bytes_list":
raise ValueError(
"Internal CollectionDef for attached messages has kind %s, "
"expected bytes_list" % collection_def.WhichOneof("kind"))
attachment = module_attachment_pb2.ModuleAttachment()
for value in collection_def.bytes_list.value:
attachment.ParseFromString(value)
result[attachment.key] = attachment.value # Immutable; needs no copy.
return result
def _export_tags(meta_graph, tags):
"""Exports tags into a MetaGraphDef."""
if tags is not None:
meta_graph.meta_info_def.tags.extend(tags)
def _check_asset_node_def(node_def):
"""Raises TypeError if `node_def` does not match the expectations."""
if node_def.op != "Const":
raise TypeError("Asset node must be of type constant.")
if tf.as_dtype(node_def.attr["dtype"].type) != tf.string:
raise TypeError("Asset node must be of dtype string.")
if len(node_def.attr["value"].tensor.string_val) != 1:
raise TypeError("Asset node must be a scalar.")
def _merge_assets_key_collection(saved_model_proto, path):
"""Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto.
Removes the ASSETS_KEY collection from the GraphDefs in the SavedModel and
modifies nodes with the assets filenames to point to the assets in `path`.
After this transformation, the SavedModel GraphDefs can be used without
feeding asset tensors.
Args:
saved_model_proto: SavedModel proto to be modified.
path: path where the SavedModel is being loaded from.
"""
for meta_graph in saved_model_proto.meta_graphs:
node_asset_map = {}
if tf_v1.saved_model.constants.ASSETS_KEY in meta_graph.collection_def:
assets_any_proto = meta_graph.collection_def[
tf_v1.saved_model.constants.ASSETS_KEY].any_list.value
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
asset_filename = _get_asset_filename(path, asset_proto.filename)
node_asset_map[_get_node_name_from_tensor(
asset_proto.tensor_info.name)] = asset_filename
del meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY]
for node in meta_graph.graph_def.node:
asset_filepath = node_asset_map.get(node.name)
if asset_filepath:
_check_asset_node_def(node)
node.attr["value"].tensor.string_val[0] = asset_filepath
def _make_assets_key_collection(saved_model_proto, export_path):
"""Creates an ASSETS_KEY collection in the GraphDefs in saved_model_proto.
Adds an ASSETS_KEY collection to the GraphDefs in the SavedModel and returns
a map from original asset filename to filename when exporting the SavedModel
to `export_path`.
This is roughly the inverse operation of `_merge_assets_key_collection`.
Args:
saved_model_proto: SavedModel proto to be modified.
export_path: string with path where the saved_model_proto will be exported.
Returns:
A map from original asset filename to asset filename when exporting the
SavedModel to path.
Raises:
ValueError: on unsuported/unexpected SavedModel.
"""
asset_filenames = {}
used_asset_filenames = set()
def _make_asset_filename(original_filename):
"""Returns the asset filename to use for the file."""
if original_filename in asset_filenames:
return asset_filenames[original_filename]
basename = os.path.basename(original_filename)
suggestion = basename
index = 0
while suggestion in used_asset_filenames:
suggestion = tf.compat.as_bytes(basename) + tf.compat.as_bytes(str(index))
index += 1
asset_filenames[original_filename] = suggestion
used_asset_filenames.add(suggestion)
return suggestion
for meta_graph in saved_model_proto.meta_graphs:
collection_def = meta_graph.collection_def.get(
tf_v1.GraphKeys.ASSET_FILEPATHS)
if collection_def is None:
continue
if collection_def.WhichOneof("kind") != "node_list":
raise ValueError(
"MetaGraph collection ASSET_FILEPATHS is not a list of tensors.")
for tensor in collection_def.node_list.value:
if not tensor.endswith(":0"):
raise ValueError("Unexpected tensor in ASSET_FILEPATHS collection.")
asset_nodes = set([
_get_node_name_from_tensor(tensor)
for tensor in collection_def.node_list.value
])
tensor_filename_map = {}
for node in meta_graph.graph_def.node:
if node.name in asset_nodes:
_check_asset_node_def(node)
filename = node.attr["value"].tensor.string_val[0]
tensor_filename_map[node.name + ":0"] = filename
logging.debug("Found asset node %s pointing to %s", node.name, filename)
# Clear value to avoid leaking the original path.
node.attr["value"].tensor.string_val[0] = (
tf.compat.as_bytes("SAVEDMODEL-ASSET"))
if tensor_filename_map:
assets_key_collection = meta_graph.collection_def[
tf_v1.saved_model.constants.ASSETS_KEY]
for tensor, filename in sorted(tensor_filename_map.items()):
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = _make_asset_filename(filename)
asset_proto.tensor_info.name = tensor
assets_key_collection.any_list.value.add().Pack(asset_proto)
return {
original_filename: _get_asset_filename(export_path, asset_filename)
for original_filename, asset_filename in asset_filenames.items()
}
class SavedModelHandler(object):
"""SavedModelHandler helps using SavedModel disk format.
Note: This is a lower level interface than most users need. See SavedModel
Builder/Loader API for an higher-level API centered around exporting and
loading Sessions.
A SavedModel disk format represents a collection of Graphs. To allow these
graphs to be easy to manipulate, SavedModel extends Graphs with tags and
signatures. Additionally it packages graphs, assets and variable checkpoints
into an hermetic directory that can be moved around.
This class hides the implementation details of SavedModels, in particular
related with assets and signatures.
SavedModelHandler deals with assets by:
- Only supporting asset files as constant ops added to ASSET_FILEPATHS
collection.
- Creating a ASSETS_KEY collection only when writing meta_graphs to disk so
they are never visible to user.
- Baking the ASSETS_KEY collection in the graphs when loading from disk as
to hide that the assets point to the packaged assets.
SavedModelHandler deals with signatures by:
- Providing `add_signature` API that allows to declare signatures directly
on a graph.
- That API is supported by a collection that is not serialized, but instead
is converted into the right fields of MetaGraphDef when writing and
loading a SavedModel from disk.
"""
def | |
the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_DownCast)
__swig_destroy__ = _Extrema.delete_Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d
Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d.Nullify = new_instancemethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_Nullify,None,Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d)
Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d.IsNull = new_instancemethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_IsNull,None,Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d)
Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d.GetObject = new_instancemethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_GetObject,None,Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d)
Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_swigregister = _Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_swigregister
Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_swigregister(Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d)
def Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_DownCast(*args):
return _Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_DownCast(*args)
Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_DownCast = _Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnCurv2d_DownCast
class Extrema_SequenceNodeOfSequenceOfPOnSurf(OCC.TCollection.TCollection_SeqNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param I:
:type I: Extrema_POnSurf &
:param n:
:type n: TCollection_SeqNodePtr &
:param p:
:type p: TCollection_SeqNodePtr &
:rtype: None
"""
_Extrema.Extrema_SequenceNodeOfSequenceOfPOnSurf_swiginit(self,_Extrema.new_Extrema_SequenceNodeOfSequenceOfPOnSurf(*args))
def Value(self, *args):
"""
:rtype: Extrema_POnSurf
"""
return _Extrema.Extrema_SequenceNodeOfSequenceOfPOnSurf_Value(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _Extrema.delete_Extrema_SequenceNodeOfSequenceOfPOnSurf
Extrema_SequenceNodeOfSequenceOfPOnSurf.Value = new_instancemethod(_Extrema.Extrema_SequenceNodeOfSequenceOfPOnSurf_Value,None,Extrema_SequenceNodeOfSequenceOfPOnSurf)
Extrema_SequenceNodeOfSequenceOfPOnSurf_swigregister = _Extrema.Extrema_SequenceNodeOfSequenceOfPOnSurf_swigregister
Extrema_SequenceNodeOfSequenceOfPOnSurf_swigregister(Extrema_SequenceNodeOfSequenceOfPOnSurf)
class Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf(OCC.TCollection.Handle_TCollection_SeqNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_swiginit(self,_Extrema.new_Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_DownCast)
__swig_destroy__ = _Extrema.delete_Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf
Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf.Nullify = new_instancemethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_Nullify,None,Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf)
Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf.IsNull = new_instancemethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_IsNull,None,Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf)
Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf.GetObject = new_instancemethod(_Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_GetObject,None,Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf)
Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_swigregister = _Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_swigregister
Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_swigregister(Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf)
def Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_DownCast(*args):
return _Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_DownCast(*args)
Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_DownCast = _Extrema.Handle_Extrema_SequenceNodeOfSequenceOfPOnSurf_DownCast
class Extrema_SequenceOfPOnCurv(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SequenceOfPOnCurv_swiginit(self,_Extrema.new_Extrema_SequenceOfPOnCurv(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SequenceOfPOnCurv &
:rtype: Extrema_SequenceOfPOnCurv
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SequenceOfPOnCurv &
:rtype: Extrema_SequenceOfPOnCurv
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param S:
:type S: Extrema_SequenceOfPOnCurv &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param S:
:type S: Extrema_SequenceOfPOnCurv &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SequenceOfPOnCurv &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SequenceOfPOnCurv &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SequenceOfPOnCurv_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SequenceOfPOnCurv &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnCurv &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SequenceOfPOnCurv_ChangeValue(self, *args)
def Remove(self, *args):
"""
:param Index:
:type Index: int
:rtype: None
:param FromIndex:
:type FromIndex: int
:param ToIndex:
:type ToIndex: int
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv_Remove(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_SequenceOfPOnCurv
Extrema_SequenceOfPOnCurv.Clear = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Clear,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Assign = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Assign,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Set = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Set,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Append = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Append,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Prepend = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Prepend,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.InsertBefore = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_InsertBefore,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.InsertAfter = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_InsertAfter,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.First = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_First,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Last = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Last,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Split = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Split,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Value = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Value,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.SetValue = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_SetValue,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.ChangeValue = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_ChangeValue,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv.Remove = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv_Remove,None,Extrema_SequenceOfPOnCurv)
Extrema_SequenceOfPOnCurv_swigregister = _Extrema.Extrema_SequenceOfPOnCurv_swigregister
Extrema_SequenceOfPOnCurv_swigregister(Extrema_SequenceOfPOnCurv)
class Extrema_SequenceOfPOnCurv2d(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SequenceOfPOnCurv2d_swiginit(self,_Extrema.new_Extrema_SequenceOfPOnCurv2d(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SequenceOfPOnCurv2d &
:rtype: Extrema_SequenceOfPOnCurv2d
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SequenceOfPOnCurv2d &
:rtype: Extrema_SequenceOfPOnCurv2d
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param S:
:type S: Extrema_SequenceOfPOnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param S:
:type S: Extrema_SequenceOfPOnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SequenceOfPOnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SequenceOfPOnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SequenceOfPOnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_ChangeValue(self, *args)
def Remove(self, *args):
"""
:param Index:
:type Index: int
:rtype: None
:param FromIndex:
:type FromIndex: int
:param ToIndex:
:type ToIndex: int
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnCurv2d_Remove(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_SequenceOfPOnCurv2d
Extrema_SequenceOfPOnCurv2d.Clear = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Clear,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Assign = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Assign,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Set = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Set,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Append = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Append,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Prepend = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Prepend,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.InsertBefore = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_InsertBefore,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.InsertAfter = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_InsertAfter,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.First = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_First,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Last = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Last,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Split = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Split,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Value = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Value,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.SetValue = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_SetValue,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.ChangeValue = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_ChangeValue,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d.Remove = new_instancemethod(_Extrema.Extrema_SequenceOfPOnCurv2d_Remove,None,Extrema_SequenceOfPOnCurv2d)
Extrema_SequenceOfPOnCurv2d_swigregister = _Extrema.Extrema_SequenceOfPOnCurv2d_swigregister
Extrema_SequenceOfPOnCurv2d_swigregister(Extrema_SequenceOfPOnCurv2d)
class Extrema_SequenceOfPOnSurf(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SequenceOfPOnSurf_swiginit(self,_Extrema.new_Extrema_SequenceOfPOnSurf(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SequenceOfPOnSurf &
:rtype: Extrema_SequenceOfPOnSurf
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SequenceOfPOnSurf &
:rtype: Extrema_SequenceOfPOnSurf
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnSurf &
:rtype: None
:param S:
:type S: Extrema_SequenceOfPOnSurf &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnSurf &
:rtype: None
:param S:
:type S: Extrema_SequenceOfPOnSurf &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnSurf &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SequenceOfPOnSurf &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnSurf &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SequenceOfPOnSurf &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnSurf
"""
return _Extrema.Extrema_SequenceOfPOnSurf_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnSurf
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SequenceOfPOnSurf &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnSurf
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnSurf &
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnSurf
"""
return _Extrema.Extrema_SequenceOfPOnSurf_ChangeValue(self, *args)
def Remove(self, *args):
"""
:param Index:
:type Index: int
:rtype: None
:param FromIndex:
:type FromIndex: int
:param ToIndex:
:type ToIndex: int
:rtype: None
"""
return _Extrema.Extrema_SequenceOfPOnSurf_Remove(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_SequenceOfPOnSurf
Extrema_SequenceOfPOnSurf.Clear = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Clear,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Assign = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Assign,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Set = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Set,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Append = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Append,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Prepend = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Prepend,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.InsertBefore = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_InsertBefore,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.InsertAfter = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_InsertAfter,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.First = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_First,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Last = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Last,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Split = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Split,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Value = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Value,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.SetValue = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_SetValue,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.ChangeValue = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_ChangeValue,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf.Remove = new_instancemethod(_Extrema.Extrema_SequenceOfPOnSurf_Remove,None,Extrema_SequenceOfPOnSurf)
Extrema_SequenceOfPOnSurf_swigregister = _Extrema.Extrema_SequenceOfPOnSurf_swigregister
Extrema_SequenceOfPOnSurf_swigregister(Extrema_SequenceOfPOnSurf)
class Extrema_POnSurfParams(Extrema_POnSurf):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* empty constructor
:rtype: None
* Creation of a point on surface with parameter values on the surface and a Pnt from gp.
:param theU:
:type theU: float
:param theV:
:type theV: float
:param thePnt:
:type thePnt: gp_Pnt
:rtype: None
"""
_Extrema.Extrema_POnSurfParams_swiginit(self,_Extrema.new_Extrema_POnSurfParams(*args))
def SetSqrDistance(self, *args):
"""
* Sets the square distance from this point to another one (e.g. to the point to be projected).
:param theSqrDistance:
:type theSqrDistance: float
:rtype: None
"""
return _Extrema.Extrema_POnSurfParams_SetSqrDistance(self, *args)
def GetSqrDistance(self, *args):
"""
* Query the square distance from this point to another one.
:rtype: float
"""
return _Extrema.Extrema_POnSurfParams_GetSqrDistance(self, *args)
def SetElementType(self, | |
from collections import OrderedDict, namedtuple
from string import whitespace
import pdb
import sys
ADD = 'ADD'
SUB = 'SUB'
MUL = 'MUL'
INT_DIV = 'INT_DIV'
REAL_DIV = 'REAL_DIV'
DOT = 'DOT'
SEMI = 'SEMI'
COLON = 'COLON'
COMMA = 'COMMA'
PROGRAM = 'PROGRAM'
INT_CONST = 'INT_CONST'
REAL_CONST = 'REAL_CONST'
INT_TYPE = 'INT_TYPE'
REAL_TYPE = 'REAL_TYPE'
BEGIN = 'BEGIN'
END = 'END'
ID = 'ID'
VAR = 'VAR'
ASSIGN = 'ASSIGN'
OPAR = 'OPAR'
CPAR = 'CPAR'
EOF = 'EOF'
Token = namedtuple('Token', ['type', 'value'])
RESERVED_KEYWORDS = {
'BEGIN' : Token(BEGIN, 'BEGIN'),
'END' : Token(END, 'END'),
'INTEGER' : Token(INT_TYPE, 'INTEGER'),
'REAL' : Token(REAL_TYPE, 'REAL'),
'PROGRAM' : Token(PROGRAM, 'PROGRAM'),
'DIV' : Token(INT_DIV, 'DIV'),
'VAR' : Token(VAR, 'VAR'),
}
class Lexer(object):
def __init__(self, text):
self.text = text
self.pos = 0
self.cur_char = self.text[self.pos]
def error(self):
raise ValueError('Character not recognized.')
def get_next_char(self):
self.pos += 1
if self.pos <= len(self.text) - 1:
self.cur_char = self.text[self.pos]
else:
self.cur_char = None
def peek_next_char(self):
peek_pos = self.pos + 1
if peek_pos <= len(self.text) - 1:
return self.text[peek_pos]
else:
return None
def get_whitespace(self):
value = ''
while self.cur_char != None and self.cur_char in whitespace:
value = value + self.cur_char
self.get_next_char()
def get_comment(self):
value = ''
while self.cur_char != '}':
value = value + self.cur_char
self.get_next_char()
#gets closing '}'
value = value + self.cur_char
self.get_next_char()
def get_num(self):
value = ''
while self.cur_char != None and self.cur_char.isdigit():
value = value + self.cur_char
self.get_next_char()
if self.cur_char == '.':
value = value + self.cur_char
self.get_next_char()
#pdb.set_trace()
while self.cur_char != None and self.cur_char.isdigit():
value = value + self.cur_char
self.get_next_char()
return Token(REAL_CONST, float(value))
return Token(INT_CONST, int(value))
def get_id(self):
value = ''
while (self.cur_char != None and
(self.cur_char.isalnum() or self.cur_char == '_')):
value = value + self.cur_char
self.get_next_char()
#keywords and variables are not case sensitive
value = value.upper()
return RESERVED_KEYWORDS.get(value, Token(ID, value))
def get_next_token(self):
symbol_dict = {
'+':Token(ADD, '+'),
'-':Token(SUB, '-'),
'*':Token(MUL, '*'),
'/':Token(REAL_DIV, '/'),
',':Token(COMMA, ','),
'.':Token(DOT, '.'),
';':Token(SEMI, ';'),
':':Token(COLON, ':'),
'(':Token(OPAR, '('),
')':Token(CPAR, ')')
}
while self.cur_char != None:
if self.cur_char in whitespace:
self.get_whitespace()
continue
if self.cur_char == '{':
self.get_comment()
continue
if self.cur_char == ':' and self.peek_next_char() == '=':
self.get_next_char()
self.get_next_char()
return Token(ASSIGN, ':=')
if self.cur_char.isdigit():
return self.get_num()
if self.cur_char.isalpha() or self.cur_char == '_':
return self.get_id()
if self.cur_char in list(symbol_dict):
token = symbol_dict[self.cur_char]
self.get_next_char()
return token
self.error()
return Token(EOF, None)
class AST(object):
pass
class Program(AST):
def __init__(self, name, block):
self.name = name
self.block = block
class Block(AST):
def __init__(self, declarations, compound_statements):
self.declarations = declarations
self.compound_statements = compound_statements
class VarDecl(AST):
def __init__(self, type_node, var_node):
self.type_node = type_node
self.var_node = var_node
class Compound(AST):
def __init__(self):
self.statement_list = []
class Assign(AST):
def __init__(self, left, op, right):
self.left = left
self.token = self.op = op
self.right = right
class Empty(AST):
pass
class TypeSpec(AST):
def __init__(self, token):
self.token = token
self.value = self.token.value
class Variable(AST):
def __init__(self, token):
self.token = token
self.value = self.token.value
class Num(AST):
def __init__(self, token):
self.token = token
self.value = self.token.value
class BinOp(AST):
def __init__(self, left, token, right):
self.left = left
self.token = self.op = token
self.right = right
class UnaryOp(AST):
def __init__(self, left, factor):
self.left = self.op = left
self.factor = factor
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
self.cur_token = self.lexer.get_next_token()
def error(self):
raise NameError('Token not recognized.')
def check_token_type(self, token_type):
print(self.cur_token)
if self.cur_token.type == token_type:
self.cur_token = self.lexer.get_next_token()
else:
self.error()
def program(self):
self.check_token_type(PROGRAM)
name = self.cur_token.value
self.check_token_type(ID)
self.check_token_type(SEMI)
block = self.block()
self.check_token_type(DOT)
self.check_token_type(EOF)
return Program(name, block)
def block(self):
declarations = self.declarations()
statements = self.compound_statement()
return Block(declarations, statements)
def declarations(self):
decls = []
if self.cur_token.type == VAR:
self.check_token_type(VAR)
while self.cur_token.type == ID:
decls.extend(self.var_decl())
self.check_token_type(SEMI)
return decls
def var_decl(self):
var_nodes = [self.variable()]
while self.cur_token.type == COMMA:
self.check_token_type(COMMA)
var_nodes.append(self.variable())
self.check_token_type(COLON)
type_node = self.type_spec()
var_decls = []
for var_node in var_nodes:
var_decls.append(VarDecl(type_node, var_node))
return var_decls
def type_spec(self):
token = self.cur_token
if token.type == INT_TYPE:
self.check_token_type(INT_TYPE)
if token.type == REAL_TYPE:
self.check_token_type(REAL_TYPE)
return TypeSpec(token)
def compound_statement(self):
self.check_token_type(BEGIN)
statements = self.statement_list()
self.check_token_type(END)
compound = Compound()
for statement in statements:
compound.statement_list.append(statement)
return compound
def statement_list(self):
statements = [self.statement()]
while self.cur_token.type == SEMI:
self.check_token_type(SEMI)
statements.append(self.statement())
if self.cur_token.type == ID:
self.error()
return statements
def statement(self):
token = self.cur_token
if token.type == BEGIN:
return self.compound_statement()
if token.type == ID:
return self.assign()
else:
#oaf, this error was hard to track down
#I honestly don't know if I would have figured it
#out without looking back at other correct code
#Empty statements are *not* tokenized. Therefore, the
#above statement function must default to creating an Empty
#node. It cannot test the token for an EMPTY type which is
#what caused the bug.
return self.empty()
def assign(self):
#pdb.set_trace()
left = self.variable()
token = self.cur_token
self.check_token_type(ASSIGN)
right = self.expr1()
return Assign(left, token, right)
def empty(self):
return Empty()
def expr1(self):
node = self.expr2()
while self.cur_token.type in (ADD,SUB):
token = self.cur_token
if token.type == ADD:
self.check_token_type(ADD)
if token.type == SUB:
self.check_token_type(SUB)
node = BinOp(node, token, self.expr2())
return node
def expr2(self):
node = self.expr3()
while self.cur_token.type in (MUL, REAL_DIV, INT_DIV):
token = self.cur_token
if token.type == MUL:
self.check_token_type(MUL)
if token.type == REAL_DIV:
self.check_token_type(REAL_DIV)
if token.type == INT_DIV:
self.check_token_type(INT_DIV)
node = BinOp(node, token, self.expr3())
return node
def expr3(self):
token = self.cur_token
if token.type == ADD:
self.check_token_type(ADD)
return UnaryOp(token, self.expr3())
if token.type == SUB:
self.check_token_type(SUB)
return UnaryOp(token, self.expr3())
if token.type == OPAR:
self.check_token_type(OPAR)
node = self.expr1()
self.check_token_type(CPAR)
return node
if token.type == REAL_CONST:
num = Num(token)
self.check_token_type(REAL_CONST)
return num
if token.type == INT_CONST:
num = Num(token)
self.check_token_type(INT_CONST)
return num
else:
return self.variable()
def variable(self):
var = Variable(self.cur_token)
self.check_token_type(ID)
return var
def parse(self):
root = self.program()
return root
class NodeVisitor(object):
def visit(self, node):
visitor_name = 'visit_' + type(node).__name__
visitor = getattr(self, visitor_name, self.visitor_default)
return visitor(node)
def visitor_default(self, node):
raise NameError('No method named visit_' + type(node).__name__)
class Interpreter(NodeVisitor):
GLOBAL_SCOPE = {}
def __init__(self, root_node):
self.root_node = root_node
def error(self):
raise SyntaxError('Syntax not recognized.')
def visit_Program(self, node):
self.visit(node.block)
def visit_Block(self, node):
for decl in node.declarations:
self.visit(decl)
self.visit(node.compound_statements)
def visit_VarDecl(self, node):
pass
def visit_TypeSpec(self, node):
pass
def visit_Compound(self, node):
for statement in node.statement_list:
#pdb.set_trace()
self.visit(statement)
def visit_Assign(self, node):
var_name = node.left.value
self.GLOBAL_SCOPE[var_name] = self.visit(node.right)
def visit_Empty(self, node):
pass
def visit_BinOp(self, node):
op_type = node.op.type
left = node.left
right = node.right
if op_type == ADD:
return self.visit(left) + self.visit(right)
if op_type == SUB:
return self.visit(left) - self.visit(right)
if op_type == MUL:
return self.visit(left) * self.visit(right)
if op_type == INT_DIV:
return self.visit(left) // self.visit(right)
if op_type == REAL_DIV:
return self.visit(left) / self.visit(right)
def visit_UnaryOp(self, node):
op_type = node.op.type
if op_type == ADD:
return (+1)*self.visit(node.factor)
if op_type == SUB:
return (-1)*self.visit(node.factor)
def visit_Variable(self, node):
var_name = node.value
var_value = self.GLOBAL_SCOPE.get(var_name)
if var_value == None:
raise NameError('Variable not defined.')
else:
return var_value
def visit_Num(self, node):
return node.value
def interpret(self):
self.visit(self.root_node)
class BuildAST(NodeVisitor):
def __init__(self, root_node):
self.root_node = root_node
def visit_Program(self, node):
print(type(node).__name__)
print(node.name)
self.visit(node.block)
def visit_Block(self, node):
print(type(node).__name__)
for decl in node.declarations:
self.visit(decl)
self.visit(node.compound_statements)
def visit_VarDecl(self, node):
print(type(node).__name__)
self.visit(node.type_node)
self.visit(node.var_node)
def visit_TypeSpec(self, node):
print(type(node).__name__)
print(node.value)
def visit_Compound(self, node):
print(type(node).__name__)
for statement in node.statement_list:
#pdb.set_trace()
self.visit(statement)
def visit_Assign(self, node):
print(type(node).__name__)
self.visit(node.left)
print(node.op)
self.visit(node.right)
def visit_Empty(self, node):
print(type(node).__name__)
def visit_BinOp(self, node):
print(type(node).__name__)
op_type = node.op.type
left = node.left
right = node.right
if op_type == ADD:
print(ADD)
self.visit(left)
self.visit(right)
if op_type == SUB:
print(SUB)
self.visit(left)
self.visit(right)
if op_type == MUL:
print(MUL)
self.visit(left)
self.visit(right)
if op_type == REAL_DIV:
print(REAL_DIV)
self.visit(left)
self.visit(right)
if op_type == INT_DIV:
print(INT_DIV)
self.visit(left)
self.visit(right)
def visit_UnaryOp(self, node):
print(type(node).__name__)
op_type = node.op.type
if op_type == ADD:
print(ADD)
self.visit(node.factor)
if op_type == SUB:
print(SUB)
self.visit(node.factor)
def visit_Variable(self, node):
print(type(node).__name__)
print(node.value)
def visit_Num(self, node):
print(type(node).__name__)
print(node.value)
def build_ast(self):
self.visit(self.root_node)
class Symbol(object):
def __init__(self, name, type=None):
self.name = name
self.type = type
class BuiltinTypeSymbol(Symbol):
def __init__(self, name):
super().__init__(name)
def __str__(self):
return self.name
__repr__ = __str__
class VarSymbol(Symbol):
def __init__(self, name, type):
super().__init__(name, type)
def __str__(self):
return '<{name}:{type}>'.format(name=self.name, type=self.type)
__repr__ = __str__
class SymbolTable(object):
def __init__(self):
self._symbols = OrderedDict()
self._init_builtins()
def _init_builtins(self):
self.define(BuiltinTypeSymbol('INTEGER'))
self.define(BuiltinTypeSymbol('REAL'))
def __str__(self):
symbols = [value for value in self._symbols.values()]
s = 'Symbols:{0}'.format(symbols)
return s
def define(self, symbol):
print('Define ' + str(symbol))
self._symbols[symbol.name] = symbol
def lookup(self, name):
print('Lookup ' + str(name))
symbol = self._symbols.get(name)
return symbol
class SymbolTableBuilder(NodeVisitor):
def __init__(self, root_node):
self.symtab | |
<filename>teki_main_app.py
# -*- coding: utf-8 -*-
###########################
# Standard libraries
###########################
import csv
import logging
import os
import sys
import timeit
from datetime import datetime
if __name__ == "__main__":
"""
Starting the program will take a bit of time due to
the amount of libraries and modules being imported.
In my testing, it should take only around 3 (Mac Os 11) - 9 (Windows 10)
seconds to load all of the necessary data.
However, the speed will depend entirely on your local resources.
"""
# Variables for measuring loading time
datetime_now = datetime.now()
current_time = datetime_now.strftime("%H:%M:%S")
start_time = timeit.default_timer()
print(f"The current time is {current_time}.")
print("Please wait while libraries, modules, and "
"corpora are being imported...")
print("This should only take between 5 - 30 seconds "
"depending on your system resources...\n")
# Message Prompts
return_main_menu = "Please press enter to return to the main menu..."
enter_valid_option = "Please press enter to reenter a valid option."
###############################
# Program Continuation Function
###############################
def continue_program(*args):
"""
This function acts as a prompt for the user.
The user can choose to either continue with the program or to exit it.
:param '*args'
:type str
It can take as many string arguments as necessary.
These are displayed as the message prompts.
:return
:rtype None
"""
# Error prompts
for msg in args:
print(msg)
print("")
options = "yes", "no"
# The while-loop remains in place until
# the user provides an appropriate response.
print("Please enter the number of your response: ")
while True:
for number, choice in enumerate(options):
print(number, choice)
print("")
user = input("Would you like to continue with the program? ").lower()
if user == "0":
# Yes
user = input("Are you sure? "
"Program stability cannot be guaranteed. ").lower()
if user == "0":
break
else:
sys.exit("The program will now be terminated.")
elif user == "1":
# No answer
sys.exit("The program will not be terminated.")
else:
# Incorrect or invalid answer
print(f"'{user}' is not a valid response. {enter_valid_option} \n")
def _rebuild_requirement_resources():
"""
This function recreates the dependencies so that
the main script can run properly
in the event that certain files were deleted.
Recreating the file requierments does not create code stability,
but rather for the initial file check to be bypassed.
:return
:rtype None
There is no object, but a file is created
that is placed in the main directory.
"""
with open("requirement_resources.txt", mode="w+",
encoding="utf-8") as resources:
for path, subdirs, files in os.walk("app_program_resources"):
for name in files:
file_name, extension = os.path.splitext(name)
if not extension == ".pyc":
resources.write(os.path.join(path, name)+"\n")
print("The requirement_resources.txt file has been updated.")
sys.exit("Please comment '_rebuild_requirement_resources()' out"
" and restart the program")
# Uncomment the following line for dependencies to rebuilt.
# After having done so, comment it out again to deactivate it.
#_rebuild_requirement_resources()
#########################
# Pip libraries
#########################
"""
The program can be run without the pip modules,
but program stability will be effected.
"""
try:
import bs4
import spacy
import lxml
from spacy.lang.fr import French
from spacy.tokenizer import Tokenizer
from bs4 import BeautifulSoup
except ImportError as error:
requirements = sorted(open("requirements.txt").readlines())
print("Necessary modules are missing from the program.")
print("The following must be installed for "
"the program to function properly:\n ")
for req in requirements:
print(req.strip())
print("")
continue_program("Would you like to "
"continue without these libraries and modules?")
###########################################
# Importing custom files and modules
###########################################
"""
A program-wide check is performed for the necessary files.
The program can still be started if any of the necessary files are missing,
but the program stability will be greatly compromised.
Necessary file names are located in requirement_resources.txt
"""
core_file_check = list()
if os.path.exists("app_program_resources"):
with open("requirement_resources.txt",
mode="r", encoding="utf-8") as resource:
for line in resource:
if not os.path.exists(line.strip()):
core_file_check.append(line)
try:
from app_program_resources.app_auxiliary_functions import (
about_program,
restore_default_database,
clear_log,
DiscourseAnalysis,
end_program,
evaluation,
file_finder,
sub_menu,
write_sentences,
sentence_tokenizer,
write_to_database
)
except Exception as error:
print(f"The following error has occurred:\n{error}\n")
message = "Would you like to proceed despite this error?"
continue_program(message)
#########################
# Main Program Functions
#########################
def get_text(document):
"""
This functions reads in a .txt, .xml or .csv.
This file is either in app_program_resources/app_corpora
or it is a file that has been dynamically specified by the user.
:param document:
:type str
A path to the desired document file.
:return soup
:rtype <class 'bs4.BeautifulSoup>
If the user chooses an xml-file,
then a beautiful soup class is returned.
:return csv_data
:rtype list
If the user chooses csv file,
the a list of that data is returned.
:return text
:rtype str
If the user chooses a .txt file,
then a string object is returned.
"""
name, extension = os.path.splitext(document)
with open(document, mode="r", encoding="utf-8") as file:
if extension == ".xml":
soup = bs4.BeautifulSoup(file, "lxml")
return soup
elif extension == ".csv":
csv_reader = csv.reader(file, delimiter=",")
csv_data = [row for row in csv_reader]
return csv_data
elif extension == ".txt":
text = ""
for line in file:
sentence = line.rstrip().split()
for word in sentence:
text += f"{word} "
return text
def get_database():
"""
This function retrieves the designated database file
that is saved in a .csv file.
For the file to be properly processed,
the database should have the following format:
Word, POS, Dep, Sentence Number, Corpus Tag, Feature, Tag
corrélés,VERB,acl:relcl,SEN:2,cmr-wiki-c001-a1,LIT
The function retrieves the file by invoking
the function file_finder from app_auxiliary_functions
The databases are located in
app_program_resources/default_files/databases
or
app_user_resources/user_databases
:param
There are no parameters.
:return database
:rtype str
The path name of the database selected.
"""
database = file_finder()
return database
def content_analysis(text):
"""
This function returns the sentence results of
the functions contained within this function.
:param text
:type str
The data from the get_text function.
:return menu
:rtype dict
The collective sentence_results of the user
according to the respective function that was
chosen by the user.
"""
def process_save(sentence_count, collective_results):
"""
This function gives the user the option of either
continue with processing their sentence results or
saving the results in a designated file.
:param sentence_count
:type int
The number of sentences in the selection.
:param collective_results
:type dict
All of the sentences with their respective id markers.
:return:
if user continues with the tagging processing
:rtype None
else
:return collective sentence_results
:rtype dict
the results of the sentence analysis
so that they can be further processed.
"""
while True:
options = ("process sentences",
"save unprocessed sentences",
"return to menu")
for number, choice in enumerate(options, start=1):
print(number, choice)
user = input(f"\nThe text has been parsed into approx."
f" {sentence_count} sentences. "
f"How would you like to proceed? ")
if user == "1":
input("The sentence results will now be processed."
" Please press enter to continue...")
return collective_results
elif user == "2":
file_time_id = datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
sen_res = "app_user_resources/sentence_results/sen_results_"
unprocessed_sentences_file = f"{sen_res}{file_time_id}.csv"
write_sentences(collective_results, unprocessed_sentences_file)
print(f"\nThe sentences have been saved.")
input(return_main_menu)
return False
elif user == "3":
print("\nThe sentences will neither be saved nor processed. ")
input(return_main_menu)
break
else:
print(f"{user} is not a valid option.")
input(enter_valid_option)
def read_contents():
"""
This function only reads in the text data.
After the text data has been read to the user,
the user will be returned to the main menu.
"""
print(text)
input(f"\n{return_main_menu}")
def xml_analysis():
"""
The .xml files are located in app_program_resources/app_corpora
This function automatically extracts textual information from
the .xml file that was selected by the user.
The respective directories are listed from which the user may select.
The user must input a valid option that is within
the range of the corpus length.
Once done, the loop will be broken and the user can progress.
As the corpora are from different sources,
it was necessary to create different control structures
so that the .xml tags could be retrieved from the respective corpora.
If the user has entered a valid range,
then this range is extracted from the desired corpus.
The sentences are then parsed using the sentence_tokenizer
located in the app_auxiliary_functions.py.
It returns the parsed sentences and they are saved together
with their respective id in a dictionary.
Note:
It is theoretically possible for it to work with any file that has
a corresponding .xml format.
However, since the function was written with
those files in mind specifically,
the | |
while True:
try:
pil_image.seek( i )
except:
break
if 'duration' not in pil_image.info:
duration = 83 # (83ms -- 1000 / 12) Set a 12 fps default when duration is missing or too funky to extract. most stuff looks ok at this.
else:
duration = pil_image.info[ 'duration' ]
# In the gif frame header, 10 is stored as 1ms. This 1 is commonly as utterly wrong as 0.
if duration in ( 0, 10 ):
duration = 83
frame_durations.append( duration )
i += 1
return ( frame_durations, times_to_play_gif )
def GetICCProfileBytes( pil_image: PILImage.Image ) -> bytes:
if HasICCProfile( pil_image ):
return pil_image.info[ 'icc_profile' ]
raise HydrusExceptions.DataMissing( 'This image has no ICC profile!' )
def GetImagePixelHash( path, mime ) -> bytes:
numpy_image = GenerateNumPyImage( path, mime )
return hashlib.sha256( numpy_image.data.tobytes() ).digest()
def GetImageProperties( path, mime ):
if OPENCV_OK and mime not in PIL_ONLY_MIMETYPES: # webp here too maybe eventually, or offload it all to ffmpeg
numpy_image = GenerateNumPyImage( path, mime )
( width, height ) = GetResolutionNumPy( numpy_image )
duration = None
num_frames = None
else:
( ( width, height ), num_frames ) = GetResolutionAndNumFramesPIL( path, mime )
if num_frames > 1:
( durations, times_to_play_gif ) = GetGIFFrameDurations( path )
duration = sum( durations )
else:
duration = None
num_frames = None
return ( ( width, height ), duration, num_frames )
# bigger number is worse quality
# this is very rough and misses some finesse
def GetJPEGQuantizationQualityEstimate( path ):
try:
pil_image = RawOpenPILImage( path )
except HydrusExceptions.UnsupportedFileException:
return ( 'unknown', None )
if hasattr( pil_image, 'quantization' ):
table_arrays = list( pil_image.quantization.values() )
if len( table_arrays ) == 0:
return ( 'unknown', None )
quality = sum( ( sum( table_array ) for table_array in table_arrays ) )
quality /= len( table_arrays )
if quality >= 3400:
label = 'very low'
elif quality >= 2000:
label = 'low'
elif quality >= 1400:
label = 'medium low'
elif quality >= 1000:
label = 'medium'
elif quality >= 700:
label = 'medium high'
elif quality >= 400:
label = 'high'
elif quality >= 200:
label = 'very high'
else:
label = 'extremely high'
return ( label, quality )
return ( 'unknown', None )
def GetPSDResolution( path ):
with open( path, 'rb' ) as f:
f.seek( 14 )
height_bytes = f.read( 4 )
width_bytes = f.read( 4 )
height = struct.unpack( '>L', height_bytes )[0]
width = struct.unpack( '>L', width_bytes )[0]
return ( width, height )
def GetResolutionNumPy( numpy_image ):
( image_height, image_width, depth ) = numpy_image.shape
return ( image_width, image_height )
def GetResolutionAndNumFramesPIL( path, mime ):
pil_image = GeneratePILImage( path, dequantize = False )
( x, y ) = pil_image.size
if mime == HC.IMAGE_GIF: # some jpegs came up with 2 frames and 'duration' because of some embedded thumbnail in the metadata
try:
pil_image.seek( 1 )
pil_image.seek( 0 )
num_frames = 1
while True:
try:
pil_image.seek( pil_image.tell() + 1 )
num_frames += 1
except:
break
except:
num_frames = 1
else:
num_frames = 1
return ( ( x, y ), num_frames )
def GetThumbnailResolution( image_resolution, bounding_dimensions ):
( im_width, im_height ) = image_resolution
( bounding_width, bounding_height ) = bounding_dimensions
if bounding_width >= im_width and bounding_height >= im_height:
return ( im_width, im_height )
width_ratio = im_width / bounding_width
height_ratio = im_height / bounding_height
thumbnail_width = bounding_width
thumbnail_height = bounding_height
if width_ratio > height_ratio:
thumbnail_height = im_height / width_ratio
elif height_ratio > width_ratio:
thumbnail_width = im_width / height_ratio
thumbnail_width = max( int( thumbnail_width ), 1 )
thumbnail_height = max( int( thumbnail_height ), 1 )
return ( thumbnail_width, thumbnail_height )
def GetTimesToPlayGIF( path ) -> int:
try:
pil_image = RawOpenPILImage( path )
except HydrusExceptions.UnsupportedFileException:
return 1
return GetTimesToPlayGIFFromPIL( pil_image )
def GetTimesToPlayGIFFromPIL( pil_image: PILImage.Image ) -> int:
if 'loop' in pil_image.info:
times_to_play_gif = pil_image.info[ 'loop' ]
else:
times_to_play_gif = 1
return times_to_play_gif
def HasICCProfile( pil_image: PILImage.Image ) -> bool:
if 'icc_profile' in pil_image.info:
icc_profile = pil_image.info[ 'icc_profile' ]
if isinstance( icc_profile, bytes ) and len( icc_profile ) > 0:
return True
return False
def IsDecompressionBomb( path ) -> bool:
# there are two errors here, the 'Warning' and the 'Error', which atm is just a test vs a test x 2 for number of pixels
# 256MB bmp by default, ( 1024 ** 3 ) // 4 // 3
# we'll set it at 512MB, and now catching error should be about 1GB
PILImage.MAX_IMAGE_PIXELS = ( 512 * ( 1024 ** 2 ) ) // 3
warnings.simplefilter( 'error', PILImage.DecompressionBombError )
try:
RawOpenPILImage( path )
except ( PILImage.DecompressionBombError ):
return True
except:
# pil was unable to load it, which does not mean it was a decomp bomb
return False
finally:
PILImage.MAX_IMAGE_PIXELS = None
warnings.simplefilter( 'ignore', PILImage.DecompressionBombError )
return False
def NormaliseICCProfilePILImageToSRGB( pil_image: PILImage.Image ):
try:
icc_profile_bytes = GetICCProfileBytes( pil_image )
except HydrusExceptions.DataMissing:
return pil_image
try:
f = io.BytesIO( icc_profile_bytes )
src_profile = PILImageCms.ImageCmsProfile( f )
if pil_image.mode in ( 'L', 'LA' ):
# had a bunch of LA pngs that turned pure white on RGBA ICC conversion
# but seem to work fine if keep colourspace the same for now
# it is a mystery, I guess a PIL bug, but presumably L and LA are technically sRGB so it is still ok to this
outputMode = pil_image.mode
else:
if PILImageHasAlpha( pil_image ):
outputMode = 'RGBA'
else:
outputMode = 'RGB'
pil_image = PILImageCms.profileToProfile( pil_image, src_profile, PIL_SRGB_PROFILE, outputMode = outputMode )
except ( PILImageCms.PyCMSError, OSError ):
# 'cannot build transform' and presumably some other fun errors
# way more advanced than we can deal with, so we'll just no-op
# OSError is due to a "OSError: cannot open profile from string" a user got
# no idea, but that seems to be an ImageCms issue doing byte handling and ending up with an odd OSError?
# or maybe somehow my PIL reader or bytesIO sending string for some reason?
# in any case, nuke it for now
pass
pil_image = NormalisePILImageToRGB( pil_image )
return pil_image
def NormalisePILImageToRGB( pil_image: PILImage.Image ):
if PILImageHasAlpha( pil_image ):
desired_mode = 'RGBA'
else:
desired_mode = 'RGB'
if pil_image.mode != desired_mode:
if pil_image.mode == 'LAB':
pil_image = PILImageCms.profileToProfile( pil_image, PILImageCms.createProfile( 'LAB' ), PIL_SRGB_PROFILE, outputMode = 'RGB' )
else:
pil_image = pil_image.convert( desired_mode )
return pil_image
def PILImageHasAlpha( pil_image: PILImage.Image ):
return pil_image.mode in ( 'LA', 'RGBA' ) or ( pil_image.mode == 'P' and 'transparency' in pil_image.info )
def RawOpenPILImage( path ) -> PILImage.Image:
try:
pil_image = PILImage.open( path )
except Exception as e:
raise HydrusExceptions.DamagedOrUnusualFileException( 'Could not load the image--it was likely malformed!' )
return pil_image
def ResizeNumPyImage( numpy_image: numpy.array, target_resolution ) -> numpy.array:
( target_width, target_height ) = target_resolution
( image_width, image_height ) = GetResolutionNumPy( numpy_image )
| |
030400
0.0000000E+00 0.0000000E+00 0.6296256E+08 0.1647892E+10 0.0000000E+00 201400
0.0000000E+00 0.0000000E+00 0.9951448E+10 0.5164005E+10 0.0000000E+00 111400
0.0000000E+00 0.0000000E+00 0.7414713E+10 0.3544284E+10 0.0000000E+00 021400
-0.2259765E+10-0.9989260E+09 0.0000000E+00 0.0000000E+00-0.1003933E+10 102400
0.3345159E+09-0.5395210E+09 0.0000000E+00 0.0000000E+00-0.1804238E+10 012400
0.0000000E+00 0.0000000E+00-0.1881120E+10-0.1131220E+09 0.0000000E+00 003400
0.0000000E+00 0.0000000E+00 0.1146895E+10 0.5761725E+09 0.0000000E+00 300301
0.0000000E+00 0.0000000E+00 0.5367666E+09 0.4733975E+09 0.0000000E+00 210301
0.0000000E+00 0.0000000E+00-0.1835765E+10-0.5289399E+09 0.0000000E+00 120301
0.0000000E+00 0.0000000E+00-0.1174663E+10-0.4030918E+09 0.0000000E+00 030301
0.1115345E+11 0.3656051E+10 0.0000000E+00 0.0000000E+00 0.7949242E+09 201301
0.1459562E+11 0.4659637E+10 0.0000000E+00 0.0000000E+00 0.9949378E+09 111301
0.4413764E+10 0.1348671E+10 0.0000000E+00 0.0000000E+00 0.3230346E+09 021301
0.0000000E+00 0.0000000E+00 0.2837919E+10 0.9850787E+09 0.0000000E+00 102301
0.0000000E+00 0.0000000E+00 0.1072337E+10 0.5867213E+09 0.0000000E+00 012301
0.2792893E+09 0.1062281E+09 0.0000000E+00 0.0000000E+00 -6824455. 003301
0.2010433E+09 0.5326376E+08 0.0000000E+00 0.0000000E+00 0.5417479E+08 300202
0.1301185E+09 0.3185835E+08 0.0000000E+00 0.0000000E+00 0.6038376E+08 210202
-0.2773834E+09-0.8597787E+08 0.0000000E+00 0.0000000E+00-0.1187110E+08 120202
-0.1657962E+09-0.5051984E+08 0.0000000E+00 0.0000000E+00-0.1764495E+08 030202
0.0000000E+00 0.0000000E+00 0.1398549E+09 0.7688729E+08 0.0000000E+00 201202
0.0000000E+00 0.0000000E+00 0.2021603E+09 0.8446181E+08 0.0000000E+00 111202
0.0000000E+00 0.0000000E+00 -829791.3 -4648031. 0.0000000E+00 021202
-0.1206902E+09-0.4601588E+08 0.0000000E+00 0.0000000E+00 0.1233336E+08 102202
-0.6473666E+08-0.2247205E+08 0.0000000E+00 0.0000000E+00 0.1380750E+08 012202
0.0000000E+00 0.0000000E+00 -8304302. 2931836. 0.0000000E+00 003202
0.0000000E+00 0.0000000E+00 2219800. 1239676. 0.0000000E+00 300103
0.0000000E+00 0.0000000E+00 -572043.6 349167.6 0.0000000E+00 210103
0.0000000E+00 0.0000000E+00-0.1476718E+08 -4438188. 0.0000000E+00 120103
0.0000000E+00 0.0000000E+00 -7218157. -2449947. 0.0000000E+00 030103
0.8360994E+08 0.2523247E+08 0.0000000E+00 0.0000000E+00 8230262. 201103
0.9685654E+08 0.2914382E+08 0.0000000E+00 0.0000000E+00 9227953. 111103
0.2728290E+08 8365120. 0.0000000E+00 0.0000000E+00 2338263. 021103
0.0000000E+00 0.0000000E+00 0.2003854E+08 4953618. 0.0000000E+00 102103
0.0000000E+00 0.0000000E+00 0.1528733E+08 3935164. 0.0000000E+00 012103
1651584. 479639.9 0.0000000E+00 0.0000000E+00 -36700.04 003103
231946.5 47152.77 0.0000000E+00 0.0000000E+00 14637.13 300004
284416.3 44805.51 0.0000000E+00 0.0000000E+00 5894.313 210004
-200997.3 -65212.58 0.0000000E+00 0.0000000E+00 -21931.93 120004
-186618.7 -47120.59 0.0000000E+00 0.0000000E+00 -13180.15 030004
0.0000000E+00 0.0000000E+00 -153026.7 -45907.21 0.0000000E+00 201004
0.0000000E+00 0.0000000E+00 -168538.2 -71126.33 0.0000000E+00 111004
0.0000000E+00 0.0000000E+00 -389763.7 -136458.9 0.0000000E+00 021004
-883486.6 -272346.4 0.0000000E+00 0.0000000E+00 -74704.06 102004
-466453.2 -140576.7 0.0000000E+00 0.0000000E+00 -37592.99 012004
0.0000000E+00 0.0000000E+00 36003.33 7473.475 0.0000000E+00 003004
0.0000000E+00 0.0000000E+00 0.3401349E+10 0.1202015E+10 0.0000000E+00 200500
0.0000000E+00 0.0000000E+00 0.8487357E+10 0.2999641E+10 0.0000000E+00 110500
0.0000000E+00 0.0000000E+00 0.4973495E+10 0.2061458E+10 0.0000000E+00 020500
-0.8419414E+10-0.3333235E+10 0.0000000E+00 0.0000000E+00-0.1545576E+10 101500
-0.3394056E+10-0.1885291E+10 0.0000000E+00 0.0000000E+00-0.1893922E+10 011500
0.0000000E+00 0.0000000E+00-0.2246885E+10 0.2282267E+09 0.0000000E+00 002500
0.3948881E+10 0.1296699E+10 0.0000000E+00 0.0000000E+00 0.2894606E+09 200401
0.5088685E+10 0.1619784E+10 0.0000000E+00 0.0000000E+00 0.3995805E+09 110401
0.1273257E+10 0.3874027E+09 0.0000000E+00 0.0000000E+00 0.1499869E+09 020401
0.0000000E+00 0.0000000E+00 0.1331292E+10 0.4856548E+09 0.0000000E+00 101401
0.0000000E+00 0.0000000E+00 0.3989400E+08 0.2765795E+09 0.0000000E+00 011401
0.5829218E+09 0.2031062E+09 0.0000000E+00 0.0000000E+00 -2964176. 002401
0.0000000E+00 0.0000000E+00 0.1248501E+09 0.4499776E+08 0.0000000E+00 200302
0.0000000E+00 0.0000000E+00 0.1792722E+09 0.4601455E+08 0.0000000E+00 110302
0.0000000E+00 0.0000000E+00 0.5416893E+08 761123.8 0.0000000E+00 020302
-0.3837699E+09-0.1287064E+09 0.0000000E+00 0.0000000E+00 -5170232. 101302
-0.1873061E+09-0.6222686E+08 0.0000000E+00 0.0000000E+00 3064131. 011302
0.0000000E+00 0.0000000E+00-0.5567557E+08 -3313831. 0.0000000E+00 002302
0.5279715E+08 0.1621289E+08 0.0000000E+00 0.0000000E+00 5976889. 200203
0.6013825E+08 0.1879073E+08 0.0000000E+00 0.0000000E+00 6572688. 110203
0.1239868E+08 4044204. 0.0000000E+00 0.0000000E+00 1328699. 020203
0.0000000E+00 0.0000000E+00 0.4440244E+08 0.1135745E+08 0.0000000E+00 101203
0.0000000E+00 0.0000000E+00 0.2937297E+08 8521687. 0.0000000E+00 011203
0.1298490E+08 3954940. 0.0000000E+00 0.0000000E+00 -13580.93 002203
0.0000000E+00 0.0000000E+00 222249.9 3933.063 0.0000000E+00 200104
0.0000000E+00 0.0000000E+00 -75917.90 -113856.6 0.0000000E+00 110104
0.0000000E+00 0.0000000E+00 -489871.1 -273965.6 0.0000000E+00 020104
-1493777. -519109.8 0.0000000E+00 0.0000000E+00 -20525.72 101104
-947988.2 -326888.3 0.0000000E+00 0.0000000E+00 9097.968 011104
0.0000000E+00 0.0000000E+00 118757.4 91968.55 0.0000000E+00 002104
24254.80 5439.169 0.0000000E+00 0.0000000E+00 1311.744 200005
23328.90 5055.641 0.0000000E+00 0.0000000E+00 1302.854 110005
-3905.565 -1001.003 0.0000000E+00 0.0000000E+00 -200.9389 020005
0.0000000E+00 0.0000000E+00 62867.68 10873.84 0.0000000E+00 101005
0.0000000E+00 0.0000000E+00 30254.90 4044.343 0.0000000E+00 011005
20267.10 5501.414 0.0000000E+00 0.0000000E+00 -186.9831 002005
-0.3699542E+10-0.1516515E+10 0.0000000E+00 0.0000000E+00-0.7713627E+09 100600
-0.1569619E+10-0.8600310E+09 0.0000000E+00 0.0000000E+00-0.8024237E+09 010600
0.0000000E+00 0.0000000E+00-0.1169523E+10 0.6452813E+09 0.0000000E+00 001600
0.0000000E+00 0.0000000E+00-0.1534725E+09-0.1935379E+08 0.0000000E+00 100501
0.0000000E+00 0.0000000E+00-0.4478034E+09-0.1763579E+08 0.0000000E+00 010501
0.4556388E+09 0.1658176E+09 0.0000000E+00 0.0000000E+00 0.2339054E+08 001501
-0.1457095E+09-0.5080045E+08 0.0000000E+00 0.0000000E+00 -7310179. 100402
-0.4761719E+08-0.1830207E+08 0.0000000E+00 0.0000000E+00 -2337224. 010402
0.0000000E+00 0.0000000E+00-0.8474024E+08 -4698123. 0.0000000E+00 001402
0.0000000E+00 0.0000000E+00 0.2437653E+08 6554845. 0.0000000E+00 100303
0.0000000E+00 0.0000000E+00 0.1614093E+08 5151183. 0.0000000E+00 010303
0.1612128E+08 5187028. 0.0000000E+00 0.0000000E+00 -230593.9 001303
545605.2 122262.2 0.0000000E+00 0.0000000E+00 149262.1 100204
313832.7 52989.45 0.0000000E+00 0.0000000E+00 96544.62 010204
0.0000000E+00 0.0000000E+00 -350435.5 136291.7 0.0000000E+00 001204
0.0000000E+00 0.0000000E+00 141882.8 47498.86 0.0000000E+00 100105
0.0000000E+00 0.0000000E+00 82728.51 26100.60 0.0000000E+00 010105
33377.86 10156.19 0.0000000E+00 0.0000000E+00 -592.1389 001105
1993.637 428.3709 0.0000000E+00 0.0000000E+00 110.2606 100006
1306.351 293.6170 0.0000000E+00 0.0000000E+00 58.59527 010006
0.0000000E+00 0.0000000E+00 -1523.415 -353.3578 0.0000000E+00 001006
0.0000000E+00 0.0000000E+00-0.1073474E+09 0.4141696E+09 0.0000000E+00 000700
-1042524. 0.1248082E+08 0.0000000E+00 0.0000000E+00 0.1850928E+08 000601
0.0000000E+00 0.0000000E+00-0.3706018E+08 -723932.2 0.0000000E+00 000502
2035165. 863027.3 0.0000000E+00 0.0000000E+00 -247675.9 000403
0.0000000E+00 0.0000000E+00 -514223.3 -11895.02 0.0000000E+00 000304
-2022.265 211.0716 0.0000000E+00 0.0000000E+00 1578.421 000205
0.0000000E+00 0.0000000E+00 -2125.842 -673.4955 0.0000000E+00 000106
90.11078 20.93524 0.0000000E+00 0.0000000E+00 3.921578 000007
-0.1241625E+10-0.1326080E+09 0.0000000E+00 0.0000000E+00 0.8894077E+08 800000
-0.5904261E+10 0.8109356E+09 0.0000000E+00 0.0000000E+00 0.3140676E+10 710000
-0.6549261E+11-0.8342304E+10 0.0000000E+00 0.0000000E+00 0.1386882E+11 620000
-0.3065535E+12-0.6196940E+11 0.0000000E+00 0.0000000E+00 0.2744726E+11 530000
-0.6449958E+12-0.1416417E+12 0.0000000E+00 0.0000000E+00 0.3248935E+11 440000
-0.6892184E+12-0.1484102E+12 0.0000000E+00 0.0000000E+00 0.2928483E+11 350000
-0.3758958E+12-0.7192821E+11 0.0000000E+00 0.0000000E+00 0.2106340E+11 260000
-0.9482983E+11-0.1244040E+11 0.0000000E+00 0.0000000E+00 0.9619017E+10 170000
-0.7934949E+10 0.2641410E+09 0.0000000E+00 0.0000000E+00 0.1820693E+10 080000
0.0000000E+00 0.0000000E+00 0.5223071E+10 0.1289293E+10 0.0000000E+00 701000
0.0000000E+00 0.0000000E+00 0.6035408E+10-0.2723166E+10 0.0000000E+00 611000
0.0000000E+00 0.0000000E+00-0.1162013E+12-0.6710796E+11 0.0000000E+00 521000
0.0000000E+00 0.0000000E+00-0.4991306E+12-0.2465264E+12 0.0000000E+00 431000
0.0000000E+00 0.0000000E+00-0.8578111E+12-0.3941308E+12 0.0000000E+00 341000
0.0000000E+00 0.0000000E+00-0.7221457E+12-0.3044731E+12 0.0000000E+00 251000
0.0000000E+00 0.0000000E+00-0.2864122E+12-0.1036987E+12 0.0000000E+00 161000
0.0000000E+00 0.0000000E+00-0.4293655E+11-0.1091104E+11 0.0000000E+00 071000
-0.1337299E+12-0.4238826E+11 0.0000000E+00 0.0000000E+00-0.3324307E+11 602000
0.8734934E+11 0.4260018E+11 0.0000000E+00 0.0000000E+00-0.3153807E+11 512000
0.1450203E+13 0.5146624E+12 0.0000000E+00 0.0000000E+00 0.1563847E+12 422000
0.2287491E+13 0.7930609E+12 0.0000000E+00 0.0000000E+00 0.2950036E+12 332000
0.1224482E+13 0.4171409E+12 0.0000000E+00 0.0000000E+00 0.1595034E+12 242000
0.1439244E+12 0.4488026E+11 0.0000000E+00 0.0000000E+00 0.1350689E+11 152000
-0.3083812E+11-0.1180318E+11 0.0000000E+00 0.0000000E+00-0.5960107E+10 062000
0.0000000E+00 0.0000000E+00-0.3010681E+12-0.1157698E+12 0.0000000E+00 503000
0.0000000E+00 0.0000000E+00-0.6870045E+12-0.2539329E+12 0.0000000E+00 413000
0.0000000E+00 0.0000000E+00-0.3426010E+12-0.9579518E+11 0.0000000E+00 323000
0.0000000E+00 0.0000000E+00 0.1809767E+12 0.1120276E+12 0.0000000E+00 233000
0.0000000E+00 0.0000000E+00 0.1587809E+12 0.7929063E+11 0.0000000E+00 143000
0.0000000E+00 0.0000000E+00 0.2095080E+11 0.9863961E+10 0.0000000E+00 053000
-0.3559892E+12-0.1215620E+12 0.0000000E+00 0.0000000E+00-0.4719576E+11 404000
-0.6651718E+12-0.2273892E+12 0.0000000E+00 0.0000000E+00-0.9872797E+11 314000
-0.1976025E+12-0.6737101E+11 0.0000000E+00 0.0000000E+00-0.5357852E+11 224000
0.1705784E+12 0.5900071E+11 0.0000000E+00 0.0000000E+00 0.2717292E+10 134000
0.6741602E+11 0.2342878E+11 0.0000000E+00 0.0000000E+00 0.5268764E+10 044000
0.0000000E+00 0.0000000E+00-0.8552882E+11-0.3573061E+11 0.0000000E+00 305000
0.0000000E+00 0.0000000E+00-0.1559323E+12-0.6366039E+11 0.0000000E+00 215000
0.0000000E+00 0.0000000E+00-0.9032252E+11-0.3371385E+11 0.0000000E+00 125000
0.0000000E+00 0.0000000E+00-0.1658228E+11-0.4618579E+10 0.0000000E+00 035000
-0.6226716E+11-0.2128656E+11 0.0000000E+00 0.0000000E+00-0.3708963E+10 206000
-0.7525640E+11-0.2556401E+11 0.0000000E+00 0.0000000E+00-0.4290717E+10 116000
-0.1899486E+11-0.6372782E+10 0.0000000E+00 0.0000000E+00-0.1051603E+10 026000
0.0000000E+00 0.0000000E+00-0.8598102E+09-0.7509532E+09 0.0000000E+00 107000
0.0000000E+00 0.0000000E+00-0.4726686E+09-0.5100896E+09 0.0000000E+00 017000
-0.3435761E+09-0.1432140E+09 0.0000000E+00 0.0000000E+00-0.6904768E+08 008000
0.0000000E+00 0.0000000E+00 0.7774829E+10 0.3933874E+10 0.0000000E+00 700100
0.0000000E+00 0.0000000E+00-0.1175537E+10 0.6723510E+10 0.0000000E+00 610100
0.0000000E+00 0.0000000E+00-0.2047701E+12-0.6497474E+11 0.0000000E+00 520100
0.0000000E+00 0.0000000E+00-0.7521483E+12-0.2844906E+12 0.0000000E+00 430100
0.0000000E+00 0.0000000E+00-0.1192580E+13-0.4553381E+12 0.0000000E+00 340100
0.0000000E+00 0.0000000E+00-0.9320339E+12-0.3270137E+12 0.0000000E+00 250100
0.0000000E+00 0.0000000E+00-0.3459819E+12-0.9506359E+11 0.0000000E+00 160100
0.0000000E+00 0.0000000E+00-0.4823245E+11-0.6170177E+10 0.0000000E+00 070100
-0.3250394E+12-0.1046372E+12 0.0000000E+00 0.0000000E+00-0.6675746E+11 601100
0.1721912E+11 0.3401888E+11 0.0000000E+00 0.0000000E+00-0.3696886E+11 511100
0.2407619E+13 0.8732868E+12 0.0000000E+00 0.0000000E+00 0.3536614E+12 421100
0.3399929E+13 0.1204381E+13 0.0000000E+00 0.0000000E+00 0.5568786E+12 331100
0.1254848E+13 0.4427031E+12 0.0000000E+00 0.0000000E+00 0.2422336E+12 241100
-0.1896815E+12-0.6594242E+11 0.0000000E+00 0.0000000E+00-0.4397519E+10 151100
-0.1130942E+12-0.3988727E+11 0.0000000E+00 0.0000000E+00-0.1353075E+11 061100
0.0000000E+00 0.0000000E+00-0.8869260E+12-0.3364140E+12 0.0000000E+00 502100
0.0000000E+00 0.0000000E+00-0.1922910E+13-0.6729494E+12 0.0000000E+00 412100
0.0000000E+00 0.0000000E+00-0.8799367E+12-0.1357550E+12 0.0000000E+00 322100
0.0000000E+00 0.0000000E+00 0.4170506E+12 0.3994957E+12 0.0000000E+00 232100
0.0000000E+00 0.0000000E+00 0.2553724E+12 0.2128354E+12 0.0000000E+00 142100
0.0000000E+00 0.0000000E+00-0.1685595E+11 0.1148143E+11 0.0000000E+00 052100
-0.1509139E+13-0.5176500E+12 0.0000000E+00 0.0000000E+00-0.2094323E+12 403100
-0.2682093E+13-0.9222414E+12 0.0000000E+00 0.0000000E+00-0.4310835E+12 313100
-0.7283723E+12-0.2505869E+12 0.0000000E+00 0.0000000E+00-0.2275374E+12 223100
0.6624696E+12 0.2307514E+12 0.0000000E+00 0.0000000E+00 0.1744583E+11 133100
0.2403215E+12 0.8474707E+11 0.0000000E+00 0.0000000E+00 0.2549258E+11 043100
0.0000000E+00 0.0000000E+00-0.4513837E+12-0.1895617E+12 0.0000000E+00 304100
0.0000000E+00 0.0000000E+00-0.8480173E+12-0.3432619E+12 0.0000000E+00 214100
0.0000000E+00 0.0000000E+00-0.5342248E+12-0.1916479E+12 0.0000000E+00 124100
0.0000000E+00 0.0000000E+00-0.1094299E+12-0.2847785E+11 0.0000000E+00 034100
-0.4418481E+12-0.1510934E+12 0.0000000E+00 0.0000000E+00-0.2585794E+11 205100
-0.5239538E+12-0.1773682E+12 0.0000000E+00 0.0000000E+00-0.2831419E+11 115100
-0.1314634E+12-0.4375892E+11 0.0000000E+00 0.0000000E+00-0.6795256E+10 025100
0.0000000E+00 0.0000000E+00-0.3743490E+10-0.6069405E+10 0.0000000E+00 106100
0.0000000E+00 0.0000000E+00-0.2433207E+10-0.4298640E+10 0.0000000E+00 016100
-0.2419969E+10-0.1104548E+10 0.0000000E+00 0.0000000E+00-0.7088195E+09 007100
-0.2743503E+10-0.1012495E+10 0.0000000E+00 0.0000000E+00-0.1622548E+09 700001
-0.1368367E+11-0.5328839E+10 0.0000000E+00 0.0000000E+00-0.5118857E+09 610001
-0.3088530E+11-0.1258194E+11 0.0000000E+00 0.0000000E+00-0.5474067E+09 520001
-0.4726325E+11-0.1895652E+11 0.0000000E+00 0.0000000E+00-0.9764708E+09 430001
-0.5308440E+11-0.1983403E+11 0.0000000E+00 0.0000000E+00-0.2472261E+10 340001
-0.3542008E+11-0.1259283E+11 0.0000000E+00 0.0000000E+00-0.2912956E+10 250001
-0.9412104E+10-0.3451173E+10 0.0000000E+00 0.0000000E+00-0.1457324E+10 160001
0.1688103E+09-0.5125585E+08 0.0000000E+00 0.0000000E+00-0.2682741E+09 070001
0.0000000E+00 0.0000000E+00 0.3470756E+10 0.5344406E+09 0.0000000E+00 601001
0.0000000E+00 0.0000000E+00 0.2024478E+11 0.3948060E+10 0.0000000E+00 511001
0.0000000E+00 0.0000000E+00 0.5037730E+11 0.1182211E+11 0.0000000E+00 421001
0.0000000E+00 0.0000000E+00 0.6037993E+11 0.1330512E+11 0.0000000E+00 331001
0.0000000E+00 0.0000000E+00 0.3100973E+11 0.1783668E+10 0.0000000E+00 241001
0.0000000E+00 0.0000000E+00 0.6409988E+10-0.4075055E+10 0.0000000E+00 151001
0.0000000E+00 0.0000000E+00 0.1069830E+10-0.1209340E+10 0.0000000E+00 061001
-0.3036607E+11-0.7746158E+10 0.0000000E+00 0.0000000E+00-0.8406950E+10 502001
-0.2244031E+11-0.3110575E+10 0.0000000E+00 0.0000000E+00-0.1319760E+11 412001
0.1562941E+12 0.5121297E+11 0.0000000E+00 0.0000000E+00 0.1508000E+11 322001
0.2279005E+12 0.7037403E+11 0.0000000E+00 0.0000000E+00 0.3385100E+11 232001
0.7782208E+11 0.2257073E+11 0.0000000E+00 0.0000000E+00 0.1507371E+11 142001
-0.3470953E+10-0.1723793E+10 0.0000000E+00 0.0000000E+00 0.1114305E+10 052001
0.0000000E+00 0.0000000E+00-0.5645283E+11-0.2216314E+11 0.0000000E+00 403001
0.0000000E+00 0.0000000E+00-0.1114781E+12-0.4318102E+11 0.0000000E+00 313001
0.0000000E+00 0.0000000E+00-0.4232566E+11-0.1482634E+11 0.0000000E+00 223001
0.0000000E+00 0.0000000E+00 0.1824427E+11 0.8957391E+10 0.0000000E+00 133001
0.0000000E+00 0.0000000E+00 0.8883829E+10 0.3927110E+10 0.0000000E+00 043001
-0.2563407E+11-0.7474078E+10 0.0000000E+00 0.0000000E+00-0.6336011E+10 304001
-0.5115833E+11-0.1527683E+11 0.0000000E+00 0.0000000E+00-0.1118625E+11 214001
-0.1620528E+11-0.4601697E+10 0.0000000E+00 0.0000000E+00-0.4661023E+10 124001
0.5514548E+10 0.1905428E+10 0.0000000E+00 0.0000000E+00-0.1290861E+08 034001
0.0000000E+00 0.0000000E+00-0.1028734E+11-0.3925635E+10 0.0000000E+00 205001
0.0000000E+00 0.0000000E+00-0.1110308E+11-0.4405503E+10 0.0000000E+00 115001
0.0000000E+00 0.0000000E+00-0.2400925E+10-0.9464882E+09 0.0000000E+00 025001
-0.5963793E+09-0.1252587E+09 0.0000000E+00 0.0000000E+00-0.1719446E+09 106001
-0.6895882E+09-0.2027095E+09 0.0000000E+00 0.0000000E+00-0.1526123E+09 016001
0.0000000E+00 0.0000000E+00-0.1625344E+09-0.4433455E+08 0.0000000E+00 007001
-0.2011766E+12-0.6436301E+11 0.0000000E+00 0.0000000E+00-0.3056494E+11 600200
-0.1732477E+12-0.3292637E+11 0.0000000E+00 0.0000000E+00 0.1073802E+11 510200
0.6159862E+12 0.2719994E+12 0.0000000E+00 0.0000000E+00 0.2299299E+12 420200
0.6820468E+12 0.3048567E+12 0.0000000E+00 0.0000000E+00 0.3084246E+12 330200
-0.1755150E+12-0.1656795E+11 0.0000000E+00 0.0000000E+00 0.1299139E+12 240200
-0.3481081E+12-0.1066575E+12 0.0000000E+00 0.0000000E+00 0.7418414E+10 150200
-0.7831850E+11-0.2583110E+11 0.0000000E+00 0.0000000E+00-0.2582837E+10 060200
0.0000000E+00 0.0000000E+00-0.9080992E+12-0.3366030E+12 0.0000000E+00 501200
0.0000000E+00 0.0000000E+00-0.2015334E+13-0.6461733E+12 0.0000000E+00 411200
0.0000000E+00 0.0000000E+00-0.1254011E+13-0.1159865E+12 0.0000000E+00 321200
0.0000000E+00 0.0000000E+00-0.2934965E+12 0.3263204E+12 0.0000000E+00 231200
0.0000000E+00 0.0000000E+00-0.2899169E+12 0.1169902E+12 0.0000000E+00 141200
0.0000000E+00 0.0000000E+00-0.1727912E+12-0.2255565E+11 0.0000000E+00 051200
-0.2410715E+13-0.8310011E+12 0.0000000E+00 0.0000000E+00-0.3335611E+12 402200
-0.4210400E+13-0.1455202E+13 0.0000000E+00 0.0000000E+00-0.6735590E+12 312200
-0.1314695E+13-0.4529539E+12 0.0000000E+00 0.0000000E+00-0.3388226E+12 222200
0.7666786E+12 0.2724038E+12 0.0000000E+00 0.0000000E+00 0.4864052E+11 132200
0.2742750E+12 0.9953838E+11 0.0000000E+00 0.0000000E+00 0.4978807E+11 042200
0.0000000E+00 0.0000000E+00-0.9312931E+12-0.3937498E+12 0.0000000E+00 303200
0.0000000E+00 0.0000000E+00-0.1872227E+13-0.7451769E+12 0.0000000E+00 213200
0.0000000E+00 0.0000000E+00-0.1344068E+13-0.4610524E+12 0.0000000E+00 123200
0.0000000E+00 0.0000000E+00-0.3139888E+12-0.7934902E+11 0.0000000E+00 033200
-0.1330315E+13-0.4539010E+12 0.0000000E+00 0.0000000E+00-0.6676000E+11 204200
-0.1559715E+13-0.5247670E+12 0.0000000E+00 0.0000000E+00-0.6745694E+11 | |
<filename>iperf_test/myiperf.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: cp936 -*-
'''
<Auther> : guolei
<Function> :
Used to finish net test projects with iperf.This script is used for windows,
and this port must be the device who will be test and an other port best for
linux.This is the enviroment for my test all above.
'''
import sys
import threading
import os
import time
import serial
import encodings
import re
import socket
import string
import linecache
import pyh
import xlsxwriter
import traceback
SER_ALIVE = []
LOSSRATE = []
'''
=======================================================
================Iperf Paramiters Settings =============
=======================================================
'''
TestMod = '-u' #Set test mode(TCP:'';UDP:'-u')
DevType = 'cli' #Set DUT type(Server:'ser';Client:'cli')
DstIP = '172.16.58.3' #Set your PC IP
DstIP2 = '192.168.127.12' #Set DUT IP
TestTime = '5' #Set test time(sec)
TestSpeed = 100 #Set packages send speed
TestFrameArray =('64','128','256','512','1024','1280','1472')
#TestFrameArray =('1280','1472')
#Set test frame list
Dir_Path = './resoult/' #Set script path
'''
=======================================================
================ Serail Paramiters Config =============
=======================================================
'''
Serail_Com_Port = '/dev/ttyUSB0' #Set serial port
Serail_Com_Baud_Rate = 38400 #Set baud rate
Serail_Com_Data_Bits = 8 #Set below default
Serail_Com_Parity = 'N'
Serail_Com_Stop_Bits = 1
Serail_Com_Timeout = 1
Serail_Com_Xonxoff = 0
Serail_Com_Rtscts = 0
'''
=======================================================
=================== Log File Config ==================
=======================================================
'''
Log_File_Name = 'ser_read_log' #Set log file name
Iperf_Resoult_Name = 'iperf_resoult'
Top_Log_Name = 'top_resoult_log'
Excel_Report_Name = 'Throughput_Report'
Top_Data_Name = 'Top_data'
lows = 8
'''
=======================================================
================ MultiThreading Setting ===============
=======================================================
'''
def threadFunc(num):
global total, mutex
#print names of threads
print threading.currentThread().getName()
for x in xrange(0, int(num)):
#get mutex
mutex.acquire()
total = total + 1
#free mutex
mutex.release()
def main(num):
#define global variable
global total, mutex
total = 0
#greate mutex
mutex = threading.Lock()
#define thread pool
threads = []
#greate thread objects
threads.append(threading.Thread(target=SerialRead, args=(1,)))
threads.append(threading.Thread(target=SerialWrite, args=(1,)))
#start all threads
for t in threads:
t.start()
#The main thread waits for all sub thread exits
for t in threads:
t.join()
def SerialRead(arg):
while SER_ALIVE:
try:
n = ser.inWaiting()
if not n:
continue
# print 'n = ',n
fileHandle = open ( Iperf_Log_File, 'a' )
read_str = ser.readall()
print read_str
fileHandle.write(read_str)
if TestMod is '':
pass
else:
if read_str.rfind('(')>0 and read_str.rfind('%)')>0:
global LOSSRATE
LOSSRATE =read_str[read_str.find('(')+1:read_str.find('%)')]
# print 'LOSSRATE in read is ',LOSSRATE
fileHandle.close()
read_str = ''
except:
print '=== STEP ERROR INFO START ==='
traceback.print_exc()
print '=== STEP ERROR INFO END ==='
def SerialWrite(arg):
try:
if TestMod is '':
print '==========Now I am testing for TCP !==========\n'
else:
print '==========Now I am testing for UDP !==========\n'
time.sleep(0.5)
ser.write('cd /usr/bin\n')
time.sleep(0.5)
ser.write('ls\n')
time.sleep(0.5)
ser.write('date > /ramdisk/'+Top_Log_Name+'.log&\n')
time.sleep(0.5)
ser.write('top -b >> /ramdisk/'+Top_Log_Name+'.log&\n')
time.sleep(5)
AllTest()
# ManuWrite()
except:
print '=== STEP ERROR INFO START ==='
traceback.print_exc()
print '=== STEP ERROR INFO END ==='
def ManuWrite():
while SER_ALIVE:
try:
strSerial = ''
Exit = 'exit'
# print 'I am SerialWrite Thread!\n'
strSerial = raw_input()
# print strSerial
# print ser.write('cd /usr/bin\nls\n')
if strSerial == Exit:
print 'Now Exit !'
sys.exit(1)
ser.write(strSerial+'\n')
except IOError('Serial Write Error'):
break
def CopyTopLog(srcfile,dstfile,dstfile2):
try:
#copy top log to top file
thefile= open(srcfile,'r')
count = len(thefile.readlines())
mysstr= 'Now_cut_top_log\n'
copylog=linecache.getlines(srcfile)
copylog=[copylog[i] for i in range(len(copylog)) if copylog[i]!='\n']
renum=copylog.index(mysstr)
open(dstfile,'w')
for n in range(renum,len(copylog)):
open(dstfile,'a').write(copylog[n])
open(dstfile,'a').close()
#copy top info to data file
thefile= open(dstfile,'r')
count = len(thefile.readlines())
sstr1= "Packet_Size"
sstr2= "CPU:"
for i in range(count):
renum1 =linecache.getline(dstfile,i).find(sstr1)
if renum1 is not -1:
renum2 =linecache.getline(dstfile,i).find(sstr2)
n=i
while renum2 is -1:
n=n-1
renum2 =linecache.getline(dstfile,n).find(sstr2)
n=n-1
renum2 =linecache.getline(dstfile,i).find(sstr2)
while renum2 is -1:
n=n-1
renum2 =linecache.getline(dstfile,n).find(sstr2)
open(dstfile2,'a').write(linecache.getline(dstfile,n))
open(dstfile2,'a').close()
except:
print '=== STEP ERROR INFO START ==='
traceback.print_exc()
print '=== STEP ERROR INFO END ==='
def CopyResoultData(srcfile,dstfile):
try:
thefile= open(srcfile,'r')
count = len(thefile.readlines())
sstr1= "Speed is:"
if TestMod is '':
sstr2= "bits/sec"
else:
sstr2= "(0%)"
sstr3= "Receiving"
open(dstfile,'w')
for i in range(count):
renum1 =linecache.getline(srcfile,i).find(sstr1)
if renum1 is not -1:
open(dstfile,'a').write(linecache.getline(srcfile,i))
open(dstfile,'a').close()
# print '=============Find Speed==============='
renum2 =linecache.getline(srcfile,i).find(sstr2)
n=i
while renum2 is -1:
n=n-1
# print 'n1 is :',n
if (n < 1) or (n > count):
print '========== Not Find bits/sec =========='
break
renum2 =linecache.getline(srcfile,n).find(sstr2)
open(dstfile,'a').write(linecache.getline(srcfile,n))
open(dstfile,'a').close()
# print '=============Find bits/sec==============='
renum3 =linecache.getline(srcfile,i).find(sstr3)
while renum3 is -1:
n=n+1
# print 'n2 is :',n
if (n < 1) or (n > count):
print '========== Not Find Receiving =========='
break
renum3 =linecache.getline(srcfile,n).find(sstr3)
open(dstfile,'a').write(linecache.getline(srcfile,n))
open(dstfile,'a').close()
# print 'Line num is :',i
# print '=============Find Receiving==============='
except:
print '=== STEP ERROR INFO START ==='
traceback.print_exc()
print '=== STEP ERROR INFO END ==='
def CreateExcelTcp(srcfile,srcfile2,dstfile):
myList = []
head_list = ['FrameSize(byte)','BandWidth(Mbps)','Throughput(Mbites/s)','CPU(%)','SIRQ(%)']
myList.append(head_list)
fileHandle = open ( srcfile, 'r' )
count = len(fileHandle.readlines())
for n in range(1,count,3):
temlist = ['']*(lows-3)
read_str = linecache.getline(srcfile,n)
temlist[1] = int(read_str[read_str.find(':')+1:read_str.find('Mbps')].strip())
n=n+1
read_str = linecache.getline(srcfile,n)
temlist[2] = float(read_str[read_str.find('Bytes')+5:read_str.find('bits/sec')-1].strip())
n=n+1
read_str = linecache.getline(srcfile,n)
temlist[0] = int(read_str[read_str.find('Receiving')+9:read_str.find('byte')].strip())
if n%3 == 0:
fileHandle2 = open ( srcfile2, 'r' )
count = len(fileHandle2.readlines())
read_str = linecache.getline(srcfile2,n/3)
temlist[3] = read_str[read_str.find('nic')+3:read_str.find('% idle')].strip()
temp = string.atof(temlist[3])
temlist[3]=float(100 - temp)
temlist[4] = float(read_str[read_str.find('% irq')+5:read_str.find('% sirq')].strip())
myList.append(temlist)
fileHandle.close()
workbook = xlsxwriter.Workbook(dstfile)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
for i in range(0,len(myList),1):
temp = '%d' %(i+1)
worksheet.write_row('A'+temp, myList[i])
i='%d' %(i+1)
#######################################################################
#
# Create a new scatter chart.
#
chart1 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart1.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$C$2:$C$'+i,
})
# Add a chart title and some axis labels.
chart1.set_title ({'name': 'Throughput'})
chart1.set_x_axis({'name': 'FrameSize(byte)'})
chart1.set_y_axis({'name': 'Throughput(Mbites/s)'})
# Set an Excel chart style.
chart1.set_style(11)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('A11', chart1, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a scatter chart sub-type with straight lines and markers.
#
chart2 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart2.add_series({
'name': '=Sheet1!$D$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$D$2:$D$'+i,
})
# Add a chart title and some axis labels.
chart2.set_title ({'name': 'CPU'})
chart2.set_x_axis({'name': 'FrameSize(byte)'})
chart2.set_y_axis({'name': 'CPU(ms)'})
# Set an Excel chart style.
chart2.set_style(12)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('A27', chart2, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a new scatter chart.
#
chart3 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart3.add_series({
'name': '=Sheet1!$E$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$E$2:$E$'+i,
})
# Add a chart title and some axis labels.
chart3.set_title ({'name': 'SIRQ'})
chart3.set_x_axis({'name': 'FrameSize(byte)'})
chart3.set_y_axis({'name': 'SIRQ(%)'})
# Set an Excel chart style.
chart3.set_style(11)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('I11', chart3, {'x_offset': 25, 'y_offset': 10})
workbook.close()
def CreateExcelUdp(srcfile,srcfile2,dstfile):
myList = []
head_list = ['FrameSize(byte)','BandWidth(Mbps)','Throughput(Mbites/s)','Jitters(ms)',
'PacketLoss','LossRate(%)','CPU(%)','SIRQ(%)']
myList.append(head_list)
fileHandle = open ( srcfile, 'r' )
count = len(fileHandle.readlines())
for n in range(1,count,3):
temlist = ['']*lows
read_str = linecache.getline(srcfile,n)
temlist[1] = int(read_str[read_str.find(':')+1:read_str.find('Mbps')].strip())
n=n+1
read_str = linecache.getline(srcfile,n)
temlist[2] = float(read_str[read_str.find('Bytes')+5:read_str.find('bits/sec')-1].strip())
temlist[3] = float(read_str[read_str.find('bits/sec')+8:read_str.find('ms')].strip())
temlist[4] = read_str[read_str.find('ms')+2:read_str.find('(')].strip()
temlist[5] = float(read_str[read_str.find('(')+1:read_str.find('%)')].strip())
n=n+1
read_str = linecache.getline(srcfile,n)
temlist[0] = int(read_str[read_str.find('Receiving')+9:read_str.find('by')-1].strip())
if n%3 == 0:
fileHandle2 = open ( srcfile2, 'r' )
count = len(fileHandle2.readlines())
read_str = linecache.getline(srcfile2,n/3)
temlist[6] = read_str[read_str.find('nic')+3:read_str.find('% idle')].strip()
temp = string.atof(temlist[6])
temlist[6]=float(100 - temp)
temlist[7] = float(read_str[read_str.find('% irq')+5:read_str.find('% sirq')].strip())
myList.append(temlist)
fileHandle.close()
workbook = xlsxwriter.Workbook(dstfile)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
for i in range(0,len(myList),1):
temp = '%d' %(i+1)
worksheet.write_row('A'+temp, myList[i])
i='%d' %(i+1)
# print '============Read Ending=============='
#######################################################################
#
# Create a new scatter chart.
#
chart1 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart1.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$C$2:$C$'+i,
})
# Add a chart title and some axis labels.
chart1.set_title ({'name': 'Throughput'})
chart1.set_x_axis({'name': 'FrameSize(byte)'})
chart1.set_y_axis({'name': 'Throughput(Mbites/s)'})
# Set an Excel chart style.
chart1.set_style(11)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('A11', chart1, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a scatter chart sub-type with straight lines and markers.
#
chart2 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart2.add_series({
'name': '=Sheet1!$D$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$D$2:$D$'+i,
})
# Add a chart title and some axis labels.
chart2.set_title ({'name': 'Jitters'})
chart2.set_x_axis({'name': 'FrameSize(byte)'})
chart2.set_y_axis({'name': 'Jitters(ms)'})
# Set an Excel chart style.
chart2.set_style(12)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('A27', chart2, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a new scatter chart.
#
chart3 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart3.add_series({
'name': '=Sheet1!$G$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$G$2:$G$'+i,
})
# Add a chart title and some axis labels.
chart3.set_title ({'name': 'CPU'})
chart3.set_x_axis({'name': 'FrameSize(byte)'})
chart3.set_y_axis({'name': 'CPU(%)'})
# Set an Excel chart style.
chart3.set_style(11)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('I11', chart3, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a new scatter chart.
#
chart4 = workbook.add_chart({'type': 'scatter','subtype': 'straight'})
# Configure the first series.
chart4.add_series({
'name': '=Sheet1!$H$1',
'categories': '=Sheet1!$A$2:$A$'+i,
'values': '=Sheet1!$H$2:$H$'+i,
})
# Add a chart title and some axis labels.
chart4.set_title ({'name': 'SIRQ'})
chart4.set_x_axis({'name': 'FrameSize(byte)'})
chart4.set_y_axis({'name': 'SIRQ(%)'})
# Set an Excel chart style.
chart4.set_style(11)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('I27', chart4, {'x_offset': 25, 'y_offset': 10})
workbook.close()
def AllTest():
TestTimeTmp = string.atoi(TestTime)
n = len(TestFrameArray)
if TestMod is '':
if DevType is 'ser':
ser.write('./iperf -s&\n')
elif DevType is 'cli':
os.system('iperf -s&\n')
else:
print '=============DevType is invalid!============='
os._exit(0)
time.sleep(0.5)
for i in range(0,n,1):
TestSpeedTmp1 = '%d' %TestSpeed
if DevType is 'ser':
ser.write('echo iperf '+TestMod+' -c '+DstIP2+' -t '+ TestTime +' -l '+TestFrameArray[i]+'\n')
os.system('iperf '+TestMod+' -c '+DstIP2+' -t '+ TestTime +' -l '+TestFrameArray[i]+'\n')
elif DevType is 'cli':
ser.write('./iperf '+TestMod+' -c '+DstIP+' -t '+ TestTime +' -l '+TestFrameArray[i]+'\n')
else:
print '=============DevType is invalid!============='
os._exit(0)
time.sleep(TestTimeTmp+5)
ser.write('echo Packet_Size is: '+TestFrameArray[i]+'byte >> /ramdisk/'+
Top_Log_Name+'.log\n')
time.sleep(1)
fileHandle = open( Iperf_Log_File,'a')
time.sleep(0.5)
fileHandle.write('Receiving '+TestFrameArray[i]+'byte datagrams\n')
time.sleep(0.5)
fileHandle.write('Speed is:'+TestSpeedTmp1+'Mbps\n')
time.sleep(0.5)
fileHandle.close()
else:
if DevType is 'ser':
ser.write('./iperf -s -u&\n')
elif DevType is 'cli':
os.system('iperf -s -u&\n')
else:
print '=============DevType is invalid!============='
os._exit(0)
for i in range(0,n,1):
TestSpeedTmp = 0 #speed in num
TestSpeedTmp1= '' #speed in char
TestSpeedTmp2 = 0 #min speed
TestSpeedTmp3 = 100 #max speed
TestSpeedTmp = TestSpeed
tmp = 2
TestSpeedTmp1 = '%d' %TestSpeedTmp
# print 'TestSpeed is ',TestSpeedTmp
# print 'LOSSRATE is ',LOSSRATE
if DevType is 'ser':
ser.write('echo iperf '+TestMod+' -c '+DstIP2+' -t '+TestTime+' -b '+TestSpeedTmp1+'M'+' -l '
+TestFrameArray[i]+'\n')
os.system('iperf '+TestMod+' -c '+DstIP2+' -t '+TestTime+' -b '+TestSpeedTmp1+'M'+' -l '
+TestFrameArray[i]+'\n')
elif DevType is 'cli':
ser.write('./iperf '+TestMod+' -c '+DstIP+' -t '+TestTime+' -b '+TestSpeedTmp1+'M'+' -l '
+TestFrameArray[i]+'\n')
else:
print '=============DevType is invalid!============='
os._exit(0)
time.sleep(TestTimeTmp+5)
# global REPORT_FLAG
# REPORT_FLAG = 0
# print '========================Flag 1======================='
# print 'LOSSRATE is ',LOSSRATE
while True:
if tmp < 2:
break
# print '========================Flag 2======================='
# print 'LOSSRATE is ',LOSSRATE
#if LOSSRATE > 0,decrease TestSpeed or break
while (LOSSRATE > '0'):
try:
# print '================LOSSRATE > 0==============='
TestSpeedTmp3 = TestSpeedTmp
tmp = TestSpeedTmp - TestSpeedTmp2
if tmp > 1:
TestSpeedTmp = (TestSpeedTmp + TestSpeedTmp2) / 2
else:
TestSpeedTmp = TestSpeedTmp2
TestSpeedTmp1 = '%d' %TestSpeedTmp
ser.write('echo Receiving '+TestFrameArray[i]+' byte datagrams\n')
time.sleep(0.5)
ser.write('echo Packet_Size is: '+TestFrameArray[i]+'byte >> /ramdisk/'+
Top_Log_Name+'.log\n')
time.sleep(0.5)
fileHandle = open( Iperf_Log_File,'a')
fileHandle.write('Speed is:'+TestSpeedTmp1+'Mbps\n')
fileHandle.close()
# print 'test end != 0'
break
TestSpeedTmp1 = '%d' %TestSpeedTmp
if DevType is 'ser':
ser.write('echo iperf '+TestMod+' -c '+DstIP2+' -t '+TestTime+' -b '+TestSpeedTmp1
+'M'+' -l '+TestFrameArray[i]+'\n')
os.system('iperf '+TestMod+' -c '+DstIP2+' -t '+TestTime+' -b '+TestSpeedTmp1
+'M'+' -l '+TestFrameArray[i]+'\n')
elif DevType is 'cli':
ser.write('./iperf '+TestMod+' -c '+DstIP+' -t '+TestTime+' -b '+TestSpeedTmp1
+'M'+' -l '+TestFrameArray[i]+'\n')
else:
print '=============DevType is invalid!============='
os._exit(0)
time.sleep(TestTimeTmp+5)
except IOError('Set Value Error'):
break
#if LOSSRATE = 0,increase TestSpeed or break
while (LOSSRATE == '0'):
try:
# print '================LOSSRATE = 0==============='
TestSpeedTmp2 = TestSpeedTmp
tmp = TestSpeedTmp3 - TestSpeedTmp
if tmp > 1:
TestSpeedTmp = (TestSpeedTmp + TestSpeedTmp3) / 2
TestSpeedTmp1 = '%d' %TestSpeedTmp
if DevType is 'ser':
ser.write('echo iperf '+TestMod+' -c '+DstIP2+' -t '+TestTime+' -b '+TestSpeedTmp1
+'M'+' -l '+TestFrameArray[i]+'\n')
os.system('iperf '+TestMod+' -c | |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import ast
import copy
import json
import logging
import textwrap
import calendar
import time
import six
import sys
from os.path import join as pjoin
from six.moves import range
from st2client import models
from st2client.commands import resource
from st2client.commands.resource import ResourceNotFoundError
from st2client.commands.resource import add_auth_token_to_kwargs_from_cli
from st2client.formatters import table
from st2client.formatters import execution as execution_formatter
from st2client.utils import jsutil
from st2client.utils.date import format_isodate_for_user_timezone
from st2client.utils.date import parse as parse_isotime
from st2client.utils.color import format_status
LOG = logging.getLogger(__name__)
LIVEACTION_STATUS_REQUESTED = 'requested'
LIVEACTION_STATUS_SCHEDULED = 'scheduled'
LIVEACTION_STATUS_DELAYED = 'delayed'
LIVEACTION_STATUS_RUNNING = 'running'
LIVEACTION_STATUS_SUCCEEDED = 'succeeded'
LIVEACTION_STATUS_FAILED = 'failed'
LIVEACTION_STATUS_TIMED_OUT = 'timeout'
LIVEACTION_STATUS_ABANDONED = 'abandoned'
LIVEACTION_STATUS_CANCELING = 'canceling'
LIVEACTION_STATUS_CANCELED = 'canceled'
LIVEACTION_STATUS_PAUSING = 'pausing'
LIVEACTION_STATUS_PAUSED = 'paused'
LIVEACTION_STATUS_RESUMING = 'resuming'
LIVEACTION_COMPLETED_STATES = [
LIVEACTION_STATUS_SUCCEEDED,
LIVEACTION_STATUS_FAILED,
LIVEACTION_STATUS_TIMED_OUT,
LIVEACTION_STATUS_CANCELED,
LIVEACTION_STATUS_ABANDONED
]
# Who parameters should be masked when displaying action execution output
PARAMETERS_TO_MASK = [
'password',
'private_key'
]
# A list of environment variables which are never inherited when using run
# --inherit-env flag
ENV_VARS_BLACKLIST = [
'pwd',
'mail',
'username',
'user',
'path',
'home',
'ps1',
'shell',
'pythonpath',
'ssh_tty',
'ssh_connection',
'lang',
'ls_colors',
'logname',
'oldpwd',
'term',
'xdg_session_id'
]
WORKFLOW_RUNNER_TYPES = [
'action-chain',
'mistral-v2',
]
def format_parameters(value):
# Mask sensitive parameters
if not isinstance(value, dict):
# No parameters, leave it as it is
return value
for param_name, _ in value.items():
if param_name in PARAMETERS_TO_MASK:
value[param_name] = '********'
return value
# String for indenting etc.
WF_PREFIX = '+ '
NON_WF_PREFIX = ' '
INDENT_CHAR = ' '
def format_wf_instances(instances):
"""
Adds identification characters to a workflow and appropriately shifts
the non-workflow instances. If no workflows are found does nothing.
"""
# only add extr chars if there are workflows.
has_wf = False
for instance in instances:
if not getattr(instance, 'children', None):
continue
else:
has_wf = True
break
if not has_wf:
return instances
# Prepend wf and non_wf prefixes.
for instance in instances:
if getattr(instance, 'children', None):
instance.id = WF_PREFIX + instance.id
else:
instance.id = NON_WF_PREFIX + instance.id
return instances
def format_execution_statuses(instances):
result = []
for instance in instances:
instance = format_execution_status(instance)
result.append(instance)
return result
def format_execution_status(instance):
"""
Augment instance "status" attribute with number of seconds which have elapsed for all the
executions which are in running state and execution total run time for all the executions
which have finished.
"""
start_timestamp = getattr(instance, 'start_timestamp', None)
end_timestamp = getattr(instance, 'end_timestamp', None)
if instance.status == LIVEACTION_STATUS_RUNNING and start_timestamp:
start_timestamp = instance.start_timestamp
start_timestamp = parse_isotime(start_timestamp)
start_timestamp = calendar.timegm(start_timestamp.timetuple())
now = int(time.time())
elapsed_seconds = (now - start_timestamp)
instance.status = '%s (%ss elapsed)' % (instance.status, elapsed_seconds)
elif instance.status in LIVEACTION_COMPLETED_STATES and start_timestamp and end_timestamp:
start_timestamp = parse_isotime(start_timestamp)
start_timestamp = calendar.timegm(start_timestamp.timetuple())
end_timestamp = parse_isotime(end_timestamp)
end_timestamp = calendar.timegm(end_timestamp.timetuple())
elapsed_seconds = (end_timestamp - start_timestamp)
instance.status = '%s (%ss elapsed)' % (instance.status, elapsed_seconds)
return instance
class ActionBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(ActionBranch, self).__init__(
models.Action, description, app, subparsers,
parent_parser=parent_parser,
commands={
'list': ActionListCommand,
'get': ActionGetCommand,
'update': ActionUpdateCommand,
'delete': ActionDeleteCommand
})
# Registers extended commands
self.commands['enable'] = ActionEnableCommand(self.resource, self.app, self.subparsers)
self.commands['disable'] = ActionDisableCommand(self.resource, self.app, self.subparsers)
self.commands['execute'] = ActionRunCommand(
self.resource, self.app, self.subparsers,
add_help=False)
class ActionListCommand(resource.ContentPackResourceListCommand):
display_attributes = ['ref', 'pack', 'description']
class ActionGetCommand(resource.ContentPackResourceGetCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'uid', 'ref', 'pack', 'name', 'description',
'enabled', 'entry_point', 'runner_type',
'parameters']
class ActionUpdateCommand(resource.ContentPackResourceUpdateCommand):
pass
class ActionEnableCommand(resource.ContentPackResourceEnableCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
'enabled', 'entry_point', 'runner_type',
'parameters']
class ActionDisableCommand(resource.ContentPackResourceDisableCommand):
display_attributes = ['all']
attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',
'enabled', 'entry_point', 'runner_type',
'parameters']
class ActionDeleteCommand(resource.ContentPackResourceDeleteCommand):
pass
class ActionRunCommandMixin(object):
"""
Mixin class which contains utility functions related to action execution.
"""
display_attributes = ['id', 'action.ref', 'context.user', 'parameters', 'status',
'start_timestamp', 'end_timestamp', 'result']
attribute_display_order = ['id', 'action.ref', 'context.user', 'parameters', 'status',
'start_timestamp', 'end_timestamp', 'result']
attribute_transform_functions = {
'start_timestamp': format_isodate_for_user_timezone,
'end_timestamp': format_isodate_for_user_timezone,
'parameters': format_parameters,
'status': format_status
}
poll_interval = 2 # how often to poll for execution completion when using sync mode
def get_resource(self, ref_or_id, **kwargs):
return self.get_resource_by_ref_or_id(ref_or_id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_cli
def run_and_print(self, args, **kwargs):
if self._print_help(args, **kwargs):
return
execution = self.run(args, **kwargs)
if args.async:
self.print_output('To get the results, execute:\n st2 execution get %s' %
(execution.id), six.text_type)
self.print_output('\nTo view output in real-time, execute:\n st2 execution '
'tail %s' % (execution.id), six.text_type)
else:
self._print_execution_details(execution=execution, args=args, **kwargs)
if execution.status == 'failed':
# Exit with non zero if the action has failed
sys.exit(1)
def _add_common_options(self):
root_arg_grp = self.parser.add_mutually_exclusive_group()
# Display options
task_list_arg_grp = root_arg_grp.add_argument_group()
task_list_arg_grp.add_argument('--raw', action='store_true',
help='Raw output, don\'t show sub-tasks for workflows.')
task_list_arg_grp.add_argument('--show-tasks', action='store_true',
help='Whether to show sub-tasks of an execution.')
task_list_arg_grp.add_argument('--depth', type=int, default=-1,
help='Depth to which to show sub-tasks. \
By default all are shown.')
task_list_arg_grp.add_argument('-w', '--width', nargs='+', type=int, default=None,
help='Set the width of columns in output.')
execution_details_arg_grp = root_arg_grp.add_mutually_exclusive_group()
detail_arg_grp = execution_details_arg_grp.add_mutually_exclusive_group()
detail_arg_grp.add_argument('--attr', nargs='+',
default=['id', 'status', 'parameters', 'result'],
help=('List of attributes to include in the '
'output. "all" or unspecified will '
'return all attributes.'))
detail_arg_grp.add_argument('-d', '--detail', action='store_true',
help='Display full detail of the execution in table format.')
result_arg_grp = execution_details_arg_grp.add_mutually_exclusive_group()
result_arg_grp.add_argument('-k', '--key',
help=('If result is type of JSON, then print specific '
'key-value pair; dot notation for nested JSON is '
'supported.'))
# Other options
detail_arg_grp.add_argument('--tail', action='store_true',
help='Automatically start tailing new execution.')
# Flag to opt-in to functionality introduced in PR #3670. More robust parsing
# of complex datatypes is planned for 2.6, so this flag will be deprecated soon
detail_arg_grp.add_argument('--auto-dict', action='store_true', dest='auto_dict',
default=False, help='Automatically convert list items to '
'dictionaries when colons are detected. '
'(NOTE - this parameter and its functionality will be '
'deprecated in the next release in favor of a more '
'robust conversion method)')
return root_arg_grp
def _print_execution_details(self, execution, args, **kwargs):
"""
Print the execution detail to stdout.
This method takes into account if an executed action was workflow or not
and formats the output accordingly.
"""
runner_type = execution.action.get('runner_type', 'unknown')
is_workflow_action = runner_type in WORKFLOW_RUNNER_TYPES
show_tasks = getattr(args, 'show_tasks', False)
raw = getattr(args, 'raw', False)
detail = getattr(args, 'detail', False)
key = getattr(args, 'key', None)
attr = getattr(args, 'attr', [])
if show_tasks and not is_workflow_action:
raise ValueError('--show-tasks option can only be used with workflow actions')
if not raw and not detail and (show_tasks or is_workflow_action):
self._run_and_print_child_task_list(execution=execution, args=args, **kwargs)
else:
instance = execution
if detail:
formatter = table.PropertyValueTable
else:
formatter = execution_formatter.ExecutionResult
if detail:
options = {'attributes': copy.copy(self.display_attributes)}
elif key:
options = {'attributes': ['result.%s' % (key)], 'key': key}
else:
options = {'attributes': attr}
options['json'] = args.json
options['attribute_transform_functions'] = self.attribute_transform_functions
self.print_output(instance, formatter, **options)
def _run_and_print_child_task_list(self, execution, args, **kwargs):
action_exec_mgr = self.app.client.managers['LiveAction']
instance = execution
options = {'attributes': ['id', 'action.ref', 'parameters', 'status', 'start_timestamp',
'end_timestamp']}
options['json'] = args.json
options['attribute_transform_functions'] = self.attribute_transform_functions
formatter = execution_formatter.ExecutionResult
kwargs['depth'] = args.depth
child_instances = action_exec_mgr.get_property(execution.id, 'children', **kwargs)
child_instances = self._format_child_instances(child_instances, execution.id)
child_instances = format_execution_statuses(child_instances)
if not child_instances:
# No child error, there might be a global error, include result in the output
options['attributes'].append('result')
status_index = options['attributes'].index('status')
if hasattr(instance, 'result') and isinstance(instance.result, dict):
tasks = instance.result.get('tasks', [])
else:
tasks = []
# On failure we also want to include error message and traceback at the top level
if instance.status == 'failed':
top_level_error, top_level_traceback = self._get_top_level_error(live_action=instance)
if len(tasks) >= 1:
task_error, task_traceback = self._get_task_error(task=tasks[-1])
else:
task_error, task_traceback = None, None
if top_level_error:
# Top-level error
instance.error = top_level_error
instance.traceback = top_level_traceback
instance.result = 'See error and traceback.'
options['attributes'].insert(status_index + 1, 'error')
options['attributes'].insert(status_index + 2, 'traceback')
elif task_error:
# Task error
instance.error = task_error
instance.traceback = task_traceback
instance.result = 'See error and traceback.'
instance.failed_on = tasks[-1].get('name', 'unknown')
options['attributes'].insert(status_index + 1, 'error')
options['attributes'].insert(status_index + 2, 'traceback')
options['attributes'].insert(status_index + 3, 'failed_on')
# Include result on the top-level object so user doesn't need to issue another command to
# see the result
if len(tasks) >= 1:
task_result = self._get_task_result(task=tasks[-1])
if task_result:
instance.result_task = tasks[-1].get('name', 'unknown')
| |
import numpy as np
import pyrealsense2 as rs
import cv2
from enum import Enum
from typing import List, Tuple
# ----------------------------- Helper functions ----------------------------- #
class Device:
def __init__(self, pipeline, pipeline_profile, align, product_line):
self.pipeline = pipeline
self.pipeline_profile = pipeline_profile
self.align = align
self.product_line = product_line
def enumerate_connected_devices(context: rs.context) -> List[Tuple[str, str]]:
"""Enumerate the connected Intel Realsense devices
Args:
context (rs.context): The context created for using the realsense library
Returns:
List[Tuple[str, str]]: List of (serial-number, product-line) of devices which are connected to the PC
"""
connect_device = []
for d in context.devices:
if d.get_info(rs.camera_info.name).lower() != 'platform camera':
device_serial = d.get_info(rs.camera_info.serial_number)
product_line = d.get_info(rs.camera_info.product_line)
device_info = (device_serial, product_line)
connect_device.append(device_info)
return connect_device
# TODO: What are good values to filter?
def post_process_depth_frame(
depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0,
spatial_smooth_alpha=0.5, spatial_smooth_delta=20,
temporal_smooth_alpha=0.4, temporal_smooth_delta=20):
"""
Filter the depth frame acquired using the Intel RealSense device
Parameters:
-----------
depth_frame : rs.frame()
The depth frame to be post-processed
decimation_magnitude : double
The magnitude of the decimation filter
spatial_magnitude : double
The magnitude of the spatial filter
spatial_smooth_alpha : double
The alpha value for spatial filter based smoothening
spatial_smooth_delta : double
The delta value for spatial filter based smoothening
temporal_smooth_alpha: double
The alpha value for temporal filter based smoothening
temporal_smooth_delta: double
The delta value for temporal filter based smoothening
Return:
----------
filtered_frame : rs.frame()
The post-processed depth frame
"""
# Post processing possible only on the depth_frame
assert (depth_frame.is_depth_frame())
# Available filters and control options for the filters
decimation_filter = rs.decimation_filter()
spatial_filter = rs.spatial_filter()
temporal_filter = rs.temporal_filter()
filter_magnitude = rs.option.filter_magnitude
filter_smooth_alpha = rs.option.filter_smooth_alpha
filter_smooth_delta = rs.option.filter_smooth_delta
# Apply the control parameters for the filter
decimation_filter.set_option(filter_magnitude, decimation_magnitude)
spatial_filter.set_option(filter_magnitude, spatial_magnitude)
spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha)
spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta)
temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha)
temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta)
# Apply the filters
filtered_frame = decimation_filter.process(depth_frame)
filtered_frame = spatial_filter.process(filtered_frame)
filtered_frame = temporal_filter.process(filtered_frame)
return filtered_frame
class SingleInstanceMetaClass(type):
def __call__(cls, *args, **kwargs):
try:
return cls.__instance
except AttributeError:
cls.__instance = super(
SingleInstanceMetaClass, cls).__call__(
*args, **kwargs)
return cls.__instance
def get_depth_at_pixel(
depth_frame: rs.frame, pixel_x: int, pixel_y: int) -> int:
"""Get the depth value at the desired image point
Args:
depth_frame (rs.frame): The depth frame containing the depth information of the image coordinate
pixel_x (int): The x value of the image coordinate
pixel_y (int): The y value of the image coordinate
Returns:
int: Depth value at the desired pixel
"""
return depth_frame.as_depth_frame().get_distance(round(pixel_x), round(pixel_y))
def convert_depth_pixel_to_metric_coordinate(depth: float, pixel_x: float, pixel_y: float,
camera_intrinsics: rs.intrinsics) -> Tuple[float, float, float]:
"""Convert the depth and image point information to metric coordinates
Args:
depth ([float]): The depth value of the image point
pixel_x (float): The x value of the image coordinate
pixel_y (float): The y value of the image coordinate
camera_intrinsics (rs.intrinsics): The intrinsic values of the imager in whose coordinate system the depth_frame is computed
Returns:
(X, Y, Z) (Tuple[float, float, float]): Coordinate of pixel
X (float): The x coordinate value in meters
Y (float): The y coordinate value in meters
Z (float): The z coordinate value in meters
"""
X = (pixel_x - camera_intrinsics.ppx) / camera_intrinsics.fx * depth
Y = (pixel_y - camera_intrinsics.ppy) / camera_intrinsics.fy * depth
Z = depth
return X, Y, Z
def convert_depth_frame_to_points(depth_image: np.ndarray,
camera_intrinsics: rs.intrinsics,
depth_scale: float = 0.001) -> Tuple[np.ndarray]:
"""Convert depth frame to a 3D point cloud
Args:
depth_image (np.ndarray): Depth image
camera_intrinsics (rs.intrinsics): Camera intrinsics
depth_scale (float, optional): Scale factor of depth. Defaults to 0.001.
Returns:
(x, y, z) (Tuple[np.ndarray]): 3 list of x coordinates, y coordinates and z coordinates
x (np.ndarray): x coordinates in meters
y (np.ndarray): y coordinates in meters
z (np.ndarray): z coordinates in meters
"""
height, width = depth_image.shape
nx = np.linspace(0, width - 1, width)
ny = np.linspace(0, height - 1, height)
u, v = np.meshgrid(nx, ny)
x = (u.flatten() - camera_intrinsics.ppx) / camera_intrinsics.fx
y = (v.flatten() - camera_intrinsics.ppy) / camera_intrinsics.fy
z = depth_image.flatten() * depth_scale
x = np.multiply(x, z)
y = np.multiply(y, z)
return x, y, z
def convert_pointcloud_to_depth(pointcloud, camera_intrinsics):
"""Convert the world coordinate to a 2D image coordinate
:param pointcloud: numpy array with shape 3xN
:type pointcloud: numpy array with shape 3xN
:param camera_intrinsics: [description]
:type camera_intrinsics: [type]
:return: (x, y)
:x: The x coordinates in image
:y: The y coordiantes in image
:rtype: tuple(array, array)
"""
assert (pointcloud.shape[0] == 3)
x_ = pointcloud[0, :]
y_ = pointcloud[1, :]
z_ = pointcloud[2, :]
m = x_[np.nonzero(z_)] / z_[np.nonzero(z_)]
n = y_[np.nonzero(z_)] / z_[np.nonzero(z_)]
x = m * camera_intrinsics.fx + camera_intrinsics.ppx
y = n * camera_intrinsics.fy + camera_intrinsics.ppy
return x, y
def get_boundary_corners_2D(points):
pass
def get_clipped_pointcloud(pointcloud, boundary):
pass
# ---------------------------------------------------------------------------- #
# ------------------------------- Main content ------------------------------- #
class DataType(Enum):
FRAMES = 1
COLOR_FRAME = 2
DEPTH_FRAME = 3
COLOR_IMAGE = 4
DEPTH_IMAGE = 5
IMAGES = 6
class RealsenseCapture(metaclass=SingleInstanceMetaClass):
"""Class to manage the Intel Realsense capture.
Args:
id (int, optional): Id of connected Realsense device. Defaults to 0.
color_size (Tuple, optional): Size of color frame. Defaults to (640, 480).
depth_size (Tuple, optional): Size of depth frame. Defaults to (640, 480).
fps (int, optional): FPS of capture. Defaults to 30.
serial (str, optional): Serial-number of desired device. Defaults to None.
"""
def __init__(
self, id: int = 0,
color_size: Tuple[int, int] = (640, 480),
depth_size: Tuple[int, int] = (640, 480),
fps: int = 30, serial: str = None) -> None: #
self._depth_size = depth_size
self._color_size = color_size
self._fps = fps
self._context = rs.context()
self._available_devices = enumerate_connected_devices(self._context)
self._device_id = id
if serial is not None:
self._serial = serial
self._device_id = self.get_device_id_from_serial(self._serial)
self._device_serial, self._product_line = self.get_device_info_from_id(
self._device_id)
self._enabled_device = None
color_width, color_height = self._color_size
depth_width, depth_height = self._depth_size
self._config = rs.config()
self._config.enable_stream(
rs.stream.color, color_width, color_height, rs.format.rgb8, fps)
self._config.enable_stream(
rs.stream.depth, depth_width, depth_height, rs.format.z16, fps)
self._camera_is_open = False
self._frames = None
def enable_device(self, enable_ir_emitter: bool = False):
"""Enable an Intel Realsense device
Or providing exact device-serial, or providing device-id for convenience
Args:
enable_ir_emitter (bool, optional): Enable/Disable the IR-Emitter of the device. Defaults to False.
Examples:
realsense_capture.enable_device(0) # 1st method
realsense_capture.enable_device(device_serial='f12345') # 2nd method
"""
try:
pipeline = rs.pipeline()
self._config.enable_device(self._device_serial)
pipeline_profile = pipeline.start(self._config)
# Set the acquisition parameters
sensor = pipeline_profile.get_device().first_depth_sensor()
if sensor.supports(rs.option.emitter_enabled):
sensor.set_option(rs.option.emitter_enabled, 1
if enable_ir_emitter else 0)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
self._enabled_device = Device(
pipeline, pipeline_profile, align, self._product_line)
self._camera_is_open = True
print(f'\n RealsenseCapture - initialized')
except:
print(f'\n RealsenseCapture - initialized not success')
def warm_up(self, dispose_frames_for_stablisation: int = 30) -> None:
"""Dispose some frames for camera-stablisation
Args:
dispose_frames_for_stablisation (int, optional): Number of disposing frames. Defaults to 30.
"""
for _ in range(dispose_frames_for_stablisation):
_ = self.read()
def read(self, return_depth=False, depth_filter=None):
"""Read data from camera
:param return_depth: Whether return depth image or not, defaults to False
:type return_depth: bool, optional
:param depth_filter: [description], defaults to None
:type depth_filter: [type], optional
:return: Whether having data, and data
:rtype: tuple(bool, array or list of array)
"""
try:
frames = self._enabled_device.pipeline.wait_for_frames()
# Align the depth frame to color frame
self._frames = self._enabled_device.align.process(frames)
if return_depth: # Return RGB image and Depth image
return True, self.get_data_according_type(
DataType.IMAGES, depth_filter)
else: # Return RGB image only
return True, self.get_data_according_type(DataType.COLOR_IMAGE)
except:
self._camera_is_open = False
print(f'\n RealsenseCapture - read: error')
return False, None
def isOpened(self):
"""Check whether the camera is open(ready to use)
:return: Is open or not
:rtype: bool
"""
return self._camera_is_open
def release(self):
"""Release/Disable cameras
"""
print(f'\n RealsenseCapture - release')
self._config.disable_all_streams()
def get_intrinsics(self, frame_type: DataType = DataType.COLOR_FRAME):
"""Get intrinsics of a frame(depth ? color)
:In this case, after alignment, intrinsics of depth and color frames
:are the same
:param frame_type: Type of frame, defaults to DataType.COLOR_FRAME
:type frame_type: DataType, optional
:return: intrinsics
:rtype: rs.intrinsics
"""
assert frame_type == DataType.COLOR_FRAME or frame_type == DataType.DEPTH_FRAME
if frame_type == DataType.COLOR_FRAME:
frame = self.get_data_according_type(DataType.COLOR_FRAME)
elif frame_type == DataType.DEPTH_FRAME:
frame = self.get_data_according_type(DataType.DEPTH_FRAME)
if frame is None:
return None
intrinsics = frame.get_profile().as_video_stream_profile().get_intrinsics()
return intrinsics
def get_depth_scale(self) -> float:
"""Get depth-scale of the connected device
Returns:
float: Depth-scale
"""
# Getting the depth sensor's depth scale (see | |
<filename>sdk/python/pulumi_snowflake/stage.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StageArgs', 'Stage']
@pulumi.input_type
class StageArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
schema: pulumi.Input[str],
aws_external_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
copy_options: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None,
encryption: Optional[pulumi.Input[str]] = None,
file_format: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
snowflake_iam_user: Optional[pulumi.Input[str]] = None,
storage_integration: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['StageTagArgs']]]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Stage resource.
:param pulumi.Input[str] database: The database in which to create the stage.
:param pulumi.Input[str] schema: The schema in which to create the stage.
:param pulumi.Input[str] comment: Specifies a comment for the stage.
:param pulumi.Input[str] copy_options: Specifies the copy options for the stage.
:param pulumi.Input[str] credentials: Specifies the credentials for the stage.
:param pulumi.Input[str] directory: Specifies the directory settings for the stage.
:param pulumi.Input[str] encryption: Specifies the encryption settings for the stage.
:param pulumi.Input[str] file_format: Specifies the file format for the stage.
:param pulumi.Input[str] name: Specifies the identifier for the stage; must be unique for the database and schema in which the stage is created.
:param pulumi.Input[str] storage_integration: Specifies the name of the storage integration used to delegate authentication responsibility for external cloud storage to a Snowflake identity and access management (IAM) entity.
:param pulumi.Input[Sequence[pulumi.Input['StageTagArgs']]] tags: Definitions of a tag to associate with the resource.
:param pulumi.Input[str] url: Specifies the URL for the stage.
"""
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "schema", schema)
if aws_external_id is not None:
pulumi.set(__self__, "aws_external_id", aws_external_id)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if copy_options is not None:
pulumi.set(__self__, "copy_options", copy_options)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if directory is not None:
pulumi.set(__self__, "directory", directory)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if file_format is not None:
pulumi.set(__self__, "file_format", file_format)
if name is not None:
pulumi.set(__self__, "name", name)
if snowflake_iam_user is not None:
pulumi.set(__self__, "snowflake_iam_user", snowflake_iam_user)
if storage_integration is not None:
pulumi.set(__self__, "storage_integration", storage_integration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
"""
The database in which to create the stage.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def schema(self) -> pulumi.Input[str]:
"""
The schema in which to create the stage.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: pulumi.Input[str]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter(name="awsExternalId")
def aws_external_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "aws_external_id")
@aws_external_id.setter
def aws_external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_external_id", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the stage.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="copyOptions")
def copy_options(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the copy options for the stage.
"""
return pulumi.get(self, "copy_options")
@copy_options.setter
def copy_options(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "copy_options", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the credentials for the stage.
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter
def directory(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the directory settings for the stage.
"""
return pulumi.get(self, "directory")
@directory.setter
def directory(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the encryption settings for the stage.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="fileFormat")
def file_format(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the file format for the stage.
"""
return pulumi.get(self, "file_format")
@file_format.setter
def file_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_format", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the stage; must be unique for the database and schema in which the stage is created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="snowflakeIamUser")
def snowflake_iam_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snowflake_iam_user")
@snowflake_iam_user.setter
def snowflake_iam_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snowflake_iam_user", value)
@property
@pulumi.getter(name="storageIntegration")
def storage_integration(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the storage integration used to delegate authentication responsibility for external cloud storage to a Snowflake identity and access management (IAM) entity.
"""
return pulumi.get(self, "storage_integration")
@storage_integration.setter
def storage_integration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_integration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StageTagArgs']]]]:
"""
Definitions of a tag to associate with the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StageTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the URL for the stage.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class _StageState:
def __init__(__self__, *,
aws_external_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
copy_options: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None,
encryption: Optional[pulumi.Input[str]] = None,
file_format: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
snowflake_iam_user: Optional[pulumi.Input[str]] = None,
storage_integration: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['StageTagArgs']]]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Stage resources.
:param pulumi.Input[str] comment: Specifies a comment for the stage.
:param pulumi.Input[str] copy_options: Specifies the copy options for the stage.
:param pulumi.Input[str] credentials: Specifies the credentials for the stage.
:param pulumi.Input[str] database: The database in which to create the stage.
:param pulumi.Input[str] directory: Specifies the directory settings for the stage.
:param pulumi.Input[str] encryption: Specifies the encryption settings for the stage.
:param pulumi.Input[str] file_format: Specifies the file format for the stage.
:param pulumi.Input[str] name: Specifies the identifier for the stage; must be unique for the database and schema in which the stage is created.
:param pulumi.Input[str] schema: The schema in which to create the stage.
:param pulumi.Input[str] storage_integration: Specifies the name of the storage integration used to delegate authentication responsibility for external cloud storage to a Snowflake identity and access management (IAM) entity.
:param pulumi.Input[Sequence[pulumi.Input['StageTagArgs']]] tags: Definitions of a tag to associate with the resource.
:param pulumi.Input[str] url: Specifies the URL for the stage.
"""
if aws_external_id is not None:
pulumi.set(__self__, "aws_external_id", aws_external_id)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if copy_options is not None:
pulumi.set(__self__, "copy_options", copy_options)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if database is not None:
pulumi.set(__self__, "database", database)
if directory is not None:
pulumi.set(__self__, "directory", directory)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if file_format is not None:
pulumi.set(__self__, "file_format", file_format)
if name is not None:
pulumi.set(__self__, "name", name)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if snowflake_iam_user is not None:
pulumi.set(__self__, "snowflake_iam_user", snowflake_iam_user)
if storage_integration is not None:
pulumi.set(__self__, "storage_integration", storage_integration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="awsExternalId")
def aws_external_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "aws_external_id")
@aws_external_id.setter
def aws_external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_external_id", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the stage.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="copyOptions")
def copy_options(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the copy options for the stage.
"""
return pulumi.get(self, "copy_options")
@copy_options.setter
def copy_options(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "copy_options", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the credentials for the stage.
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The database in which to create the stage.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def directory(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the directory settings for the stage.
"""
return pulumi.get(self, "directory")
@directory.setter
def directory(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the encryption settings for the stage.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="fileFormat")
def file_format(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the file format for the stage.
"""
return pulumi.get(self, "file_format")
| |
<gh_stars>0
# from django import http
import json
import re
from django_redis import get_redis_connection
from django.conf import settings
from django.contrib.auth import logout
from django.http.response import HttpResponse, HttpResponseForbidden, JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from apps.carts.utils import merge_cart_cookie_to_redis
from apps.goods.models import SKU
from apps.users import constants
from apps.users.models import User, Address
from meiduo_mall.settings.dev import logger
from utils.response_code import RETCODE
from django.contrib.auth.mixins import LoginRequiredMixin
from utils.secret import SecretOauth
# 15.记录用户浏览记录
class UseBrosweView(View):
# 查询记录
def get(self, request):
# 1.获取缓存redis中的 sku_ids
history_redis_client = get_redis_connection('history')
sku_ids = history_redis_client.lrange('history_%s' % request.user.id, 0, -1)
# 2.遍历所有sku_ids
skus = []
for sku_id in sku_ids:
# 3.根据id获取sku商品--构建前端的数据格式 [{},{}]
sku = SKU.objects.get(id=sku_id)
skus.append({
'id': sku.id,
'name': sku.name,
'default_image_url': sku.default_image.url,
'price': sku.price,
})
# 4.返回结果
return JsonResponse({'code': 0, 'errmsg': "OK", "skus": skus})
# 新增记录
def post(self, request):
# 1.配置 dev.py --redis库
# 2. users.urls.py 配置子路由
# 3. users.views.py 写功能
sku_id = json.loads(request.body.decode()).get('sku_id')
try:
sku = SKU.objects.get(id=sku_id)
except Exception as e:
return HttpResponseForbidden('商品不存在!')
# 3.1 链接redis数据库
history_redis_client = get_redis_connection('history')
history_key = 'history_%s' % request.user.id
# 管道操作
p1 = history_redis_client.pipeline()
# 3.2 去重
p1.lrem(history_key, 0, sku_id)
# 3.3 存储
p1.lpush(history_key, sku_id)
# 3.4 切片截取 5个
p1.ltrim(history_key, 0, 4)
# 管道执行
p1.execute()
# 4.返回结果
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
# 14.密码操作
class ChangePasswordView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'user_center_pass.html')
def post(self, request):
# 1.接收参数
old_password = request.POST.get('old_pwd')
new_password = request.POST.get('new_pwd')
new_password2 = request.POST.get('new_cpwd')
# 2.校验:判空 — 正则—密码正确 check_password()
# 校验参数
if not all([old_password, new_password, <PASSWORD>2]):
return http.HttpResponseForbidden('缺少必传参数')
ret = request.user.check_password(old_password)
if ret ==False:
return render(request, 'user_center_pass.html', {'origin_pwd_errmsg': '原始密码错误'})
if not re.match(r'^[0-9A-Za-z]{8,20}$', new_password):
return http.HttpResponseForbidden('密码最少8位,最长20位')
if new_password != <PASSWORD>:
return http.HttpResponseForbidden('两次输入的密码不一致')
# 3.修改 user.密码 set_password(加密)
try:
request.user.set_password(<PASSWORD>)
request.user.save()
except Exception as e:
logger.error(e)
return render(request, 'user_center_pass.html', {'change_pwd_errmsg': '修改密码失败'})
# 4.logout()
logout(request)
# 5.重定向到登录页
response = redirect(reverse('users:login'))
# 6.清空cookie
response.delete_cookie('username')
return response
# 13. 修改标题
class UpdateTitleAddressView(View):
def put(self, request, address_id):
# 1.获取title Json
title = json.loads(request.body.decode()).get('title')
try:
# 2. 根据id获取 address
address = Address.objects.get(id=address_id)
# 3. 修改 address.title = title
address.title = title
# 4. save()
address.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '设置地址标题失败'})
return JsonResponse({'code': RETCODE.OK, 'errmsg': '设置地址标题成功'})
# 12 .设置默认地址
class DefaultAddressView(View):
def put(self, request, address_id):
try:
# 1.根据id查地址
address = Address.objects.get(id=address_id)
# 2.修改当前用户的默认地址 default_address
request.user.default_address = address
# 3. save()
request.user.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '设置默认地址失败'})
return JsonResponse({'code': RETCODE.OK, 'errmsg': '设置默认地址成功'})
# 11.修改地址
class UpdateAddressView(View):
# 修改
def put(self, request, address_id):
# 1.拼接参数, jSOn参数
json_dict = json.loads(request.body.decode())
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 2.校验,判空正则
# 校验参数
if not all([receiver, province_id, city_id, district_id, place, mobile]):
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
# 3.修改 address_id 对应的 地址的属性
try:
address = Address.objects.get(id=address_id)
address.user = request.user
address.title = receiver
address.receiver = receiver
address.province_id = province_id
address.city_id = city_id
address.district_id = district_id
address.place = place
address.mobile = mobile
address.tel = tel
address.email = email
address.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '更新地址失败'})
# 4.构建前端需要的数据格式 { }
address = Address.objects.get(id=address_id)
address_dict = {
"id": address.id,
"title": address.title,
"receiver": address.receiver,
"province": address.province.name,
"city": address.city.name,
"district": address.district.name,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email
}
# 响应更新地址结果
return JsonResponse({'code': RETCODE.OK, 'errmsg': '更新地址成功', 'address': address_dict})
# 删除地址
def delete(self, request, address_id):
try:
# 1.获取当前的地址 address_id对应
address = Address.objects.get(id=address_id)
# 2. 修改is_deleted = True
address.is_deleted = True
# 3. save()
address.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '删除地址失败'})
return JsonResponse({'code': RETCODE.OK, 'errmsg': '删除地址成功'})
# 10.新增地址
class CreateAddressView(LoginRequiredMixin, View):
def post(self, request):
# 判断总的地址个数, 大于20 不允许在添加
count = Address.objects.filter(user=request.user, is_deleted=False).count()
count = request.user.addresses.filter(is_deleted=False).count()
if count >= 20:
return JsonResponse({'code': RETCODE.THROTTLINGERR, 'errmsg': '超过地址数量上限'})
# 1.接收参数 JSON
json_dict = json.loads(request.body.decode())
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 2.校验 判空not all[], 正则
# 3.入库
try:
address = Address.objects.create(
user=request.user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email,
)
# 判断用户 是否有默认地址, 没有默认地址自动绑定一个
if not request.user.default_address:
request.user.default_address = address
request.user.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '新增地址失败'})
# 4.构建前端要的数据json ; dict
address_dict = {
"id": address.id,
"title": address.receiver,
"receiver": address.receiver,
"province": address.province.name,
"city": address.city.name,
"district": address.district.name,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email,
}
return JsonResponse({'code': RETCODE.OK, 'errmsg': '新增地址成功', 'address': address_dict})
# 9.收货地址
class AddressView(View):
def get(self, request):
# 1.取出当前用户的 所有地址 没有删除的
addresses = Address.objects.filter(user=request.user, is_deleted=False)
# 2.构建前端需要的数据格式 列表字典
address_list = []
for address in addresses:
address_list.append({
"id": address.id,
"title": address.title,
"receiver": address.receiver,
"province": address.province.name,
"city": address.city.name,
"district": address.district.name,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email
})
context = {
'default_address_id': request.user.default_address_id,
"addresses": address_list
}
return render(request, 'user_center_site.html', context)
# 8.激活邮箱
class VerifyEmailView(View):
def get(self, request):
# 1.接收参数 token
token = request.GET.get('token')
# 2.解密 token
token_dict = SecretOauth().loads(token)
# 3.校验 userid email
try:
user = User.objects.get(id=token_dict['user_id'], email=token_dict['email'])
except Exception as e:
return HttpResponseForbidden('无效的token')
# 4. 修改 email_active
try:
user.email_active = True
user.save()
except Exception as e:
logger.error(e)
return http.HttpResponseServerError('激活邮件失败')
# 5.成功 重定向的首页
return redirect(reverse('contents:index'))
# 7.增加邮箱
class EmailView(LoginRequiredMixin, View):
def put(self, request):
# 1.接收参数 email
email = json.loads(request.body.decode()).get('email')
# 2.校验邮箱 正则
# 3. 存到该用户的email属性
try:
request.user.email = email
request.user.save()
except Exception as e:
return JsonResponse({'code': RETCODE.EMAILERR, 'errmsg': '添加邮箱失败'})
# 发邮件 耗时操作
from apps.users.utils import generate_verify_email_url
verify_url = generate_verify_email_url(request.user)
from celery_tasks.email.tasks import send_verify_email
send_verify_email.delay(email, verify_url)
# 4. 返回前端的数据响应
return JsonResponse({'code': RETCODE.OK, 'errmsg': '添加邮箱成功'})
# 6.个人中心
class UserInfoView(LoginRequiredMixin, View):
def get(self, request):
# 1.user_id --cookie取出usertname 判断
# 2. request.user
context = {
'username': request.user.username,
'mobile': request.user.mobile,
'email': request.user.email,
'email_active': request.user.email_active,
}
return render(request, 'user_center_info.html', context)
# 5.退出登录
class LogoutView(View):
def get(self, request):
# 1.退出 清空session
from django.contrib.auth import logout
logout(request)
# 2. 清空cookie
response = redirect(reverse('users:login'))
response.delete_cookie('username')
return response
# 4.登录页
class LoginView(View):
def get(self, request):
return render(request, 'login.html')
def post(self, request):
# 1.接收三个参数
username = request.POST.get('username')
password = request.POST.get('password')
remembered = request.POST.get('remembered')
# 2.校验参数
if not all([username, password]):
return HttpResponseForbidden('参数不齐全')
# 2.1 用户名
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return HttpResponseForbidden('请输入5-20个字符的用户名')
# 2.2 密码
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden('请输入8-20位的密码')
# 3.验证用户名和密码--django自带的认证
from django.contrib.auth import authenticate, login
user = authenticate(username=username, password=password)
# 如果user不存在 重新登录 --render login.html
if user is None:
return render(request, 'login.html', {'account_errmsg': '用户名或密码错误'})
# 4.保持登录状态
login(request, user)
# 5.是否记住用户名
if remembered != 'on':
# 不记住用户名 , 过期时间 0
request.session.set_expiry(0)
else:
# 记住用户名, 过期时间 默认 2周
request.session.set_expiry(None)
#接收next的值==路由
next = request.GET.get('next')
if next:
response = redirect(next)
else:
# 6.返回响应结果
response = redirect(reverse('contents:index'))
#合并购物车
response = merge_cart_cookie_to_redis(request=request, user=user, response=response)
response.set_cookie('username', username, constants.USERNAME_EXPIRE_TIME)
return response
# 3.判断手机号是否重
class MobileCountView(View):
def get(self, request, mobile):
# 1.接收 校验参数-路径里面已经正则校验过了
# 2. 去数据库 查询 手机号的 个数
count = User.objects.filter(mobile=mobile).count()
# 3.返回给前端数据 Json
from utils.response_code import RETCODE
return JsonResponse({'code': RETCODE.OK, 'errmsg': 'ok', 'count': count})
# 2.判断用户名是否重复
class UsernameCountView(View):
def get(self, request, username):
# 1.校验正则 是否符合
# 2.去数据库校验 count
from apps.users.models import User
count = User.objects.filter(username=username).count()
# 3.返回响应结果
from utils.response_code import RETCODE
return JsonResponse({
"code": '0',
"errmsg": "ok",
"count": count,
})
# 1.定义注册类视图
class RegisterView(View):
# 1.显示注册页面
def get(self, request):
return render(request, 'register.html')
# 2.注册提交功能
def post(self, request):
# 1.接收解析参数
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('password2')
mobile = request.POST.get('mobile')
allow = request.POST.get('allow')
# 2.校验 判空 正则
if not all([username, password, password2, mobile, allow]):
return HttpResponseForbidden('参数不全!')
# 2.1 用户名
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return HttpResponseForbidden('请输入5-20个字符的用户名')
# 2.2 密码
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return HttpResponseForbidden('请输入8-20位的密码')
# 2.3 二次校验
if password != <PASSWORD>:
return HttpResponseForbidden('两次密码不一致!')
# 2.4 手机号
if not re.match(r'^1[345789]\d{9}$', mobile):
return HttpResponseForbidden('您输入的手机号格式不正确')
# 2.5 是否点击同意
if allow != 'on':
return HttpResponseForbidden('请勾选用户同意!')
# 2.6 校验 短信验证码
# 2.6.1 接收前端的验证码
sms_code = request.POST.get('msg_code')
# 2.6.2 校验判空, 正则, 和后台的验证码对比
from django_redis import get_redis_connection
redis_code_client = get_redis_connection('sms_code')
redis_code = redis_code_client.get('sms_%s' % mobile)
if redis_code is None:
return render(request, 'register.html', {'sms_code_errmsg': '无效的短信验证码'})
# 千万注意: redis取出来的 bytes ===>decode() str
if redis_code.decode() != sms_code:
return render(request, 'register.html', {'sms_code_errmsg': '不正确的短信验证码'})
# 3.注册用户数据
try:
user = User.objects.create_user(
username=username,
password=password,
mobile=mobile,
)
except:
return render(request, 'register.html', {'register_errmsg': '注册失败'})
# 保持登陆状态
from django.contrib.auth import login
login(request,user)
# 4.重定向到首页
# return redirect('/')
response = redirect(reverse('contents:index'))
response.set_cookie('username', username, constants.USERNAME_EXPIRE_TIME)
| |
2), # new bucket
(2, 2, 4, 4), # new bucket because of overlap with previous bucket
(0, 0, 3, 3), # new bucket because of size
(5, 5, 7, 7), # same as first
]
expected = [[0, 3], [1], [2]]
buckets = aggregate.bucketize(bboxes)
self.assertEqual([0, 1, 2, 3], sorted(i for b in buckets for i in b))
self.assertEqual(expected, sorted(buckets))
class TestSetGetSeries(unittest.TestCase):
def setUp(self):
self.N = 10
self.properties = [{"id": i, "col_1": i * 2} for i in range(self.N)]
self.source1 = MockGeometry(
polygons=[((2.0, 2.0), (8.0, 2.0), (8.0, 8.0), (2.0, 8.0))] * self.N,
properties=self.properties,
)
self.properties = [
{"id": i, "col_2": i * 3, "col_3": i * 4} for i in range(self.N)
]
self.source2 = MockGeometry(
polygons=[((2.0, 2.0), (8.0, 2.0), (8.0, 8.0), (2.0, 8.0))] * self.N,
properties=self.properties,
)
self.request = dict(
mode="intersects", projection="EPSG:3857", geometry=box(0, 0, 10, 10)
)
def test_get_series(self):
series = geometry.GetSeriesBlock(self.source1, "col_1")
data = series.get_data(**self.request)
assert_almost_equal(data.values, [i * 2 for i in range(self.N)])
def test_get_not_available(self):
self.assertRaises(
KeyError, geometry.GetSeriesBlock, self.source1, "not_available"
)
def test_get_series_by_indexing(self):
series = self.source1["col_1"]
self.assertIsInstance(series, geometry.GetSeriesBlock)
self.assertIs(series.args[0], self.source1)
self.assertIs(series.args[1], "col_1")
def test_set_series(self):
source = geometry.SetSeriesBlock(self.source1, "added", self.source2["col_2"])
data = source.get_data(**self.request)
added_values = data["features"]["added"].values
assert_almost_equal(added_values, [i * 3 for i in range(self.N)])
self.assertSetEqual({"geometry", "col_1", "added"}, source.columns)
def test_set_series_overwrite(self):
source = geometry.SetSeriesBlock(self.source1, "col_1", self.source2["col_2"])
data = source.get_data(**self.request)
added_values = data["features"]["col_1"].values
assert_almost_equal(added_values, [i * 3 for i in range(self.N)])
self.assertSetEqual({"geometry", "col_1"}, source.columns)
def test_set_series_multiple(self):
source = geometry.SetSeriesBlock(
self.source1,
"added",
self.source2["col_2"],
"added2",
self.source2["col_3"],
)
data = source.get_data(**self.request)
added_values = data["features"]["added"].values
assert_almost_equal(added_values, [i * 3 for i in range(self.N)])
added_values = data["features"]["added2"].values
assert_almost_equal(added_values, [i * 4 for i in range(self.N)])
self.assertSetEqual({"geometry", "col_1", "added", "added2"}, source.columns)
def test_set_series_by_set_method(self):
args = ("a1", self.source2["col_2"], "a2", self.source2["col_3"])
source = self.source1.set(*args)
self.assertIsInstance(source, geometry.SetSeriesBlock)
self.assertIs(source.args[0], self.source1)
self.assertTupleEqual(source.args[1:], args)
def test_set_series_float(self):
source = geometry.SetSeriesBlock(self.source1, "constant", 2.1)
data = source.get_data(**self.request)["features"]["constant"]
self.assertTrue(np.issubdtype(data.dtype, np.floating))
self.assertTrue((data == 2.1).all())
def test_set_series_int(self):
source = geometry.SetSeriesBlock(self.source1, "constant", 2)
data = source.get_data(**self.request)
data = source.get_data(**self.request)["features"]["constant"]
self.assertTrue(np.issubdtype(data.dtype, np.integer))
self.assertTrue((data == 2).all())
def test_set_series_bool(self):
source = geometry.SetSeriesBlock(self.source1, "constant", True)
data = source.get_data(**self.request)["features"]["constant"]
self.assertTrue(data.dtype == np.bool)
self.assertTrue(data.all())
def test_set_series_string(self):
source = geometry.SetSeriesBlock(self.source1, "constant", "string")
data = source.get_data(**self.request)["features"]["constant"]
self.assertTrue((data == "string").all())
class TestWhere(unittest.TestCase):
def setUp(self):
values = [-float("inf"), -2, 1.2, 5.0, float("inf"), float("nan")]
self.properties = [
{
"id": i,
"col_1": x,
"bool_filter": True if x >= 0 else False,
"extra": x * 10,
}
for i, x in enumerate(values)
]
self.source = MockGeometry(
polygons=[((2.0, 2.0), (8.0, 2.0), (8.0, 8.0), (2.0, 8.0))] * len(values),
properties=self.properties,
)
self.prop_df = pd.DataFrame(self.properties)
self.request = dict(
mode="intersects", projection="EPSG:3857", geometry=box(0, 0, 10, 10)
)
def test_where(self):
series = field_operations.Where(
self.source["col_1"], cond=self.source["bool_filter"], other="Hola!"
)
view = self.source.set("result", series)
result = view.get_data(**self.request)
expected = self.prop_df["col_1"].where(self.prop_df["bool_filter"], "Hola!")
self.assertTrue(result["features"]["result"].equals(expected))
def test_where_with_other_column(self):
series = field_operations.Where(
self.source["col_1"],
cond=self.source["bool_filter"],
other=self.source["extra"],
)
view = self.source.set("result", series)
result = view.get_data(**self.request)
expected = self.prop_df["col_1"].where(
self.prop_df["bool_filter"], self.prop_df["extra"]
)
self.assertTrue(result["features"]["result"].equals(expected))
def test_mask(self):
series = field_operations.Mask(
self.source["col_1"], cond=self.source["bool_filter"], other="Hola!"
)
view = self.source.set("result", series)
result = view.get_data(**self.request)
expected = self.prop_df["col_1"].mask(self.prop_df["bool_filter"], "Hola!")
self.assertTrue(result["features"]["result"].equals(expected))
def test_mask_with_other_column(self):
series = field_operations.Mask(
self.source["col_1"],
cond=self.source["bool_filter"],
other=self.source["extra"],
)
view = self.source.set("result", series)
result = view.get_data(**self.request)
expected = self.prop_df["col_1"].mask(
self.prop_df["bool_filter"], self.prop_df["extra"]
)
self.assertTrue(result["features"]["result"].equals(expected))
class TestMerge(unittest.TestCase):
def setUp(self):
polygon_1 = [((2.0, 2.0), (3.0, 2.0), (3.0, 3.0), (2.0, 3.0))]
polygon_2 = [((3.0, 3.0), (4.0, 3.0), (4.0, 4.0), (3.0, 4.0))]
polygon_4 = [((5.0, 5.0), (6.0, 5.0), (6.0, 6.0), (5.0, 6.0))]
self.source_1 = MockGeometry(polygons=polygon_1)
self.source_2 = MockGeometry(polygons=polygon_2)
self.source_3 = MockGeometry([])
self.source_4 = MockGeometry(polygons=polygon_4)
self.request = dict(
geometry=box(0, 0, 10, 10), mode="intersects", projection="EPSG:3857"
)
def test_merge_dask_geomodeling(self):
view = merge.MergeGeometryBlocks(
left=self.source_1,
right=self.source_2,
how="inner",
suffixes=("", "_right"),
)
result = view.get_data(**self.request)
expected_columns = {"geometry", "geometry_right"}
self.assertSetEqual(set(result["features"].columns), expected_columns)
self.assertSetEqual(view.columns, expected_columns)
self.assertIsInstance(result["features"], gpd.GeoDataFrame)
def test_merge_dask_geomodeling_empty_source(self):
view = merge.MergeGeometryBlocks(
left=self.source_1,
right=self.source_3,
how="inner",
suffixes=("", "_right"),
)
result = view.get_data(**self.request)
self.assertTrue(result["features"].empty)
def test_merge_dask_geomodeling_extent_mode(self):
self.request["mode"] = "extent"
view = merge.MergeGeometryBlocks(
left=self.source_1, right=self.source_2, how="inner"
)
result = view.get_data(**self.request)
self.assertTupleEqual(result["extent"], (3.0, 3.0, 3.0, 3.0))
def test_merge_dask_geomodeling_extent_mode_no_intersect(self):
self.request["mode"] = "extent"
view = merge.MergeGeometryBlocks(
left=self.source_1, right=self.source_4, how="inner"
)
result = view.get_data(**self.request)
self.assertIsNone(result["extent"])
def test_merge_dask_geomodeling_extent_mode_no_intersect_outer_join(self):
self.request["mode"] = "extent"
view = merge.MergeGeometryBlocks(
left=self.source_1, right=self.source_4, how="outer"
)
result = view.get_data(**self.request)
self.assertTupleEqual(result["extent"], (2.0, 2.0, 6.0, 6.0))
def test_merge_dask_geomodeling_source_empty(self):
self.request["mode"] = "extent"
view = merge.MergeGeometryBlocks(
left=self.source_1, right=self.source_3, how="inner"
)
result = view.get_data(**self.request)
self.assertIsNone(result["extent"])
def test_merge_dask_geomodeling_no_intersect_outer_join_source_empty(self):
self.request["mode"] = "extent"
view = merge.MergeGeometryBlocks(
left=self.source_1, right=self.source_3, how="outer"
)
result = view.get_data(**self.request)
self.assertTupleEqual(result["extent"], (2.0, 2.0, 3.0, 3.0))
class TestFieldOperations(unittest.TestCase):
def setUp(self):
values = [-float("inf"), -2, 1.2, 5.0, float("inf"), float("nan")]
self.properties = [
{
"id": i,
"id_value": float(i),
"col_1": x,
"col_2": 2 * x,
"bool_1": x > 0,
"bool_2": x > 2,
"col_source": float(i * 2 + 1),
"col_choice_1": chr(i + 65), # 'A'
"col_choice_2": chr(i + 70), # 'F'
}
for i, x in enumerate(values)
]
self.prop_df = pd.DataFrame(self.properties)
self.source = MockGeometry(
polygons=[((2.0, 2.0), (8.0, 2.0), (8.0, 8.0), (2.0, 8.0))] * len(values),
properties=self.properties,
)
self.request = dict(
mode="intersects", projection="EPSG:3857", geometry=box(0, 0, 10, 10)
)
def test_choose(self):
series = field_operations.Choose(
self.source["id_value"],
self.source["col_1"],
self.source["col_2"],
self.source["bool_1"],
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual(-float("inf"), values[0]) # -inf
self.assertEqual(-4.0, values[1]) # -4.0
self.assertEqual(1.0, values[2]) # 1.0
self.assertTrue(np.isnan(values[3])) # nan
self.assertTrue(np.isnan(values[4])) # nan
self.assertTrue(np.isnan(values[5])) # nan
def test_choose_values_neq_index(self):
series = field_operations.Choose(
self.source["col_source"],
self.source["col_1"],
self.source["col_2"],
self.source["bool_1"],
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual(-float("inf"), values[0])
def test_choice_dtype_str(self):
series = field_operations.Choose(
self.source["id_value"],
self.source["col_choice_1"],
self.source["col_choice_2"],
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual("A", values[0])
self.assertEqual("G", values[1])
def test_choose_different_length(self):
val = [-float("inf"), -2, 1.2, 5.0, float("inf"), float("nan"), 1]
properties = [{"id_value": float(i)} for i, x in enumerate(val)]
source_2 = MockGeometry(
polygons=[((2.0, 2.0), (8.0, 2.0), (8.0, 8.0), (2.0, 8.0))] * len(val),
properties=properties,
)
series = field_operations.Choose(
source_2["id_value"],
self.source["col_choice_1"],
self.source["col_2"],
self.source["bool_1"],
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual("A", values[0])
self.assertEqual(-4, values[1])
self.assertTrue(values[2])
self.assertEqual(len(values), len(val))
def test_classify_field(self):
series = field_operations.Classify(
self.source["col_1"], bins=[0, 1.2, 5.0], labels=["A", "B"]
)
result = series.get_data(**self.request)
values = result.values
self.assertTrue(np.isnan(values[0])) # -inf
self.assertTrue(np.isnan(values[1])) # -2
self.assertEqual("A", values[2]) # 1.2
self.assertEqual("B", values[3]) # 5.
self.assertTrue(np.isnan(values[4])) # inf
self.assertTrue(np.isnan(values[5])) # nan
def test_classify_field_left(self):
series = field_operations.Classify(
self.source["col_1"], bins=[0, 1.2, 10.0], labels=["A", "B"], right=False
)
result = series.get_data(**self.request)
values = result.values
self.assertTrue(np.isnan(values[0])) # -inf
self.assertTrue(np.isnan(values[1])) # -2
self.assertEqual("B", values[2]) # 1.2
self.assertEqual("B", values[3]) # 5.
self.assertTrue(np.isnan(values[4])) # inf
self.assertTrue(np.isnan(values[5])) # nan
def test_classify_field_open_bounds(self):
series = field_operations.Classify(
self.source["col_1"], bins=[1.2, 5], labels=["A", "B", "C"]
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual("A", values[0]) # -inf
self.assertEqual("A", values[1]) # -2
self.assertEqual("A", values[2]) # 1.2
self.assertEqual("B", values[3]) # 5.
self.assertEqual("C", values[4]) # inf
self.assertTrue(np.isnan(values[5])) # nan
def test_classify_field_open_bounds_left(self):
series = field_operations.Classify(
self.source["col_1"], bins=[1.2, 5], labels=["A", "B", "C"], right=False
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual("A", values[0]) # -inf
self.assertEqual("A", values[1]) # -2
self.assertEqual("B", values[2]) # 1.2
self.assertEqual("C", values[3]) # 5.
self.assertEqual("C", values[4]) # inf
self.assertTrue(np.isnan(values[5])) # nan
def test_classify_from_columns_empty(self):
view = field_operations.ClassifyFromColumns(
self.source, "col_1", ["id_value"], labels=["A", "B"]
)
result = view.get_data(
mode="intersects", projection="EPSG:3857", geometry=box(0, 0, 0, 0)
)
self.assertEqual(0, len(result))
def test_classify_from_columns_varying_bin(self):
series = field_operations.ClassifyFromColumns(
self.source,
"col_1",
["id_value"],
labels=["lower_than_id", "higher_than_id"],
)
result = series.get_data(**self.request)
values = result.values
self.assertEqual("lower_than_id", values[0]) # -inf < 0
self.assertEqual("lower_than_id", values[1]) # -2 < 1
self.assertEqual("lower_than_id", values[2]) # 1.2 < 2
self.assertEqual("higher_than_id", values[3]) # 5. > 3
self.assertEqual("higher_than_id", values[4]) # inf > 4
self.assertTrue(np.isnan(values[5])) # nan
def test_classify_from_columns(self):
source_with_bins = self.source.set("bin_1", 0, "bin_2", 1.2, "bin_3", 5.0)
series = field_operations.ClassifyFromColumns(
source_with_bins, "col_1", ["bin_1", "bin_2", "bin_3"], labels=["A", "B"]
)
result = series.get_data(**self.request)
expected = field_operations.Classify(
self.source["col_1"], bins=[0, 1.2, 5.0], labels=["A", "B"]
).get_data(**self.request)
assert_series_equal(result, expected, check_names=False)
def test_classify_astype_category_int(self):
expected = field_operations.Classify(
self.source["col_source"], bins=[0, 0.5, 1.0], labels=[1, 2, 3, 4]
).get_data(**self.request)
self.assertNotEqual(expected.dtypes.name, "category")
def test_classify_astype_category_object(self):
expected = field_operations.Classify(
self.source["col_source"], bins=[0, 0.5, 1.0], labels=["A", "B", "C", "D"]
).get_data(**self.request)
self.assertEqual(expected.dtypes.name, "category")
def test_classify_from_columns_left(self):
source_with_bins = self.source.set("bin_1", 0, "bin_2", 1.2, "bin_3", 5.0)
series = field_operations.ClassifyFromColumns(
source_with_bins,
"col_1",
["bin_1", "bin_2", "bin_3"],
labels=["A", "B"],
right=False,
)
result = series.get_data(**self.request)
expected = field_operations.Classify(
self.source["col_1"], bins=[0, 1.2, 5.0], labels=["A", "B"], right=False
).get_data(**self.request)
assert_series_equal(result, expected, check_names=False)
def test_classify_from_columns_open_bounds(self):
source_with_bins = self.source.set("bin_1", 1.2, "bin_2", 5)
series = field_operations.ClassifyFromColumns(
source_with_bins, "col_1", ["bin_1", "bin_2"], labels=["A", "B", "C"]
)
result = series.get_data(**self.request)
expected = field_operations.Classify(
self.source["col_1"], bins=[1.2, 5.0], labels=["A", "B", "C"]
).get_data(**self.request)
assert_series_equal(result, expected, check_names=False)
def test_classify_from_columns_open_bounds_left(self):
source_with_bins = self.source.set("bin_1", 1.2, "bin_2", 5)
series | |
#!/usr/bin/python3
# This code originated as a project for the COMP 520 class at McGill
# University in Winter 2015. Any subsequent COMP 520 student who is
# viewing this code must follow the course rules and report any viewing
# and/or use of the code.
import sys, os, logging, argparse, cmd, posixpath
from subprocess import Popen, PIPE, check_call, check_output, CalledProcessError
from collections import OrderedDict
from difflib import Differ
# --- Logging configuration ---
logger = logging.getLogger(__file__)
# Log test failures at a level above warnings but below errors
LOG_TEST_FAILURE = (logging.WARNING + logging.ERROR) // 2
logging.addLevelName(LOG_TEST_FAILURE, 'TEST FAILED')
# --- Constants ---
# Return codes
TESTS_GOOD = 0
TESTS_FAILED = 1
TEST_ERROR = 2
PROGRAM_ERROR = 3
USER_INTERRUPT = 4
# Compiler stages
STAGES = [
'lexer',
'parser',
'weeding',
'type',
'code_gen'
]
LEXER_STAGE = 0
PARSER_STAGE = 1
WEEDING_STAGE = 2
TYPE_STAGE = 3
CODE_GEN_STAGE = 4
UNDETECTED_STAGE = len(STAGES)
STAGE_ALIASES = {
'scanner': LEXER_STAGE,
'syntax': PARSER_STAGE,
'types': TYPE_STAGE,
'semantic': CODE_GEN_STAGE,
'benchmark': CODE_GEN_STAGE
}
# --- Command-line interface ---
def initialize_config(args):
ns = get_cli_parser().parse_args(args)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(handler)
if not ns.warn:
class WarningFilter(logging.Filter):
def filter(self, record):
return record.levelno != logging.WARNING
handler.addFilter(WarningFilter())
if ns.verbose == 1:
logger.setLevel(logging.INFO)
elif ns.verbose >= 2:
logger.setLevel(logging.DEBUG)
return ns.targets, ns.exclude, ns.input, ns.interactive, ns.run
def get_cli_parser():
parser = argparse.ArgumentParser(description='Run test programs and validate errors')
parser.add_argument('targets', nargs='*',
help='''\
Files or directories containing test programs. If target is
a directory, each file with a .go extension in it and its subdirectories
will be tested.
The expected result (whether it compiles successfully and what
error it gives) is determined automagically from the path.
''')
parser.add_argument('--input', help='The name of a file which contains the programs to run')
parser.add_argument('-i', '--interactive', action='store_true', help='Run an interactive prompt for failing cases')
parser.add_argument('--no-warn', action='store_false', dest='warn', help='Suppress warnings')
parser.add_argument('-v', '--verbose', action='count', default=0, help='Increase output verbosity')
parser.add_argument('-x', '--exclude', action='append', help='Exclude files found on this path')
parser.add_argument('-r', '--run', action='store_true', help='Run valid programs against the Go output')
return parser
# --- Test runner ---
def main(targets, exclude, input_file=None, interactive=False, run_programs=False):
if targets is None:
targets = []
if input_file:
logger.debug('Reading from %s', input_file)
try:
with open(input_file) as infile:
targets.extend(read_test_targets(infile))
except IOError as e:
logger.error('could not read input file', exc_info=e)
elif not targets:
logger.debug('Reading from stdin')
targets.extend(read_test_targets(sys.stdin))
if interactive:
runner = InteractiveTestRunner(targets, exclude, run_programs=run_programs)
else:
runner = TestRunner(targets, exclude, run_programs=run_programs)
runner.test_all()
runner.print_results()
if runner.status == PROGRAM_ERROR:
logger.warn('test runner encountered errors')
return runner.status
def read_test_targets(f):
for line in f:
line = line.strip()
if line:
yield line
class TestRunner:
def __init__(self, targets=None, exclude=None, cmd=None, run_programs=False):
if exclude is None:
self.exclude = []
else:
self.exclude = [os.path.normpath(x) for x in exclude]
if targets is None:
self.unprocessed = []
else:
self.unprocessed = list(targets)
self.run_programs = run_programs
self.status = TESTS_GOOD
self.cmd = cmd or ('golite' if sys.platform == 'win32' else './golite')
# Succeed, fail, error
self.counts = [0, 0, 0]
self.config = {}
self.queue = []
self.captured_output = None
def print_results(self):
print('Runs: {}. Failed: {}. Raised error: {}.'.format(sum(self.counts), *self.counts[1:]))
def succeed(self, filename, msg):
self._update(TESTS_GOOD)
logger.info('%s\n Test passed: %s', filename, msg)
def fail(self, filename, expected, actual_result, err_msg=None, status=TESTS_FAILED):
fail_msg = [filename, ' Expected ' + expected + ' but ' + actual_result]
if err_msg:
err_msg = err_msg.strip()
if err_msg:
fail_msg.extend((' > '+line) for line in err_msg.split('\n'))
self.fail_with_message(filename, '\n'.join(fail_msg), status)
def fail_with_message(self, filename, msg, status):
logger.log(LOG_TEST_FAILURE if status == TESTS_FAILED else logging.ERROR, msg)
self._update(status)
def _update(self, status):
self.counts[status if status != PROGRAM_ERROR else TEST_ERROR] += 1
if status > self.status:
self.status = status
def test_all(self):
while self.unprocessed:
self.test_target(self.unprocessed.pop(0))
def test_target(self, target):
if os.path.isdir(target):
self.enqueue_dir(target)
elif os.path.isfile(target):
self.load_dir_config(os.path.dirname(target))
self.queue.append(target)
else:
logger.error('%s is not a file or directory', target)
self.status = PROGRAM_ERROR
return
while self.queue:
self.test_file(self.queue.pop(0))
def enqueue_dir(self, target):
for (directory, subdirs, files) in os.walk(target, topdown=True):
directory = os.path.normpath(directory)
if directory in self.exclude:
logger.info('skipping directory %s', directory)
# Jump over subdirectories
del subdirs[:]
continue
self.load_dir_config(directory)
for f in files:
if not f.endswith('.go'):
continue
full_path = os.path.join(directory, f)
if full_path in self.exclude:
logger.info('skipping file %s', full_path)
continue
self.queue.append(full_path)
def load_dir_config(self, directory, accept_dups=False):
config_path = os.path.join(directory, 'test_configuration.txt')
if not os.path.exists(config_path):
return
try:
with open(config_path) as f:
mapping = ConfigFile.load(f).get_map()
except IOError as e:
logger.error('Failed to read configuration at %s', config_path, exc_info=e)
return
for (key, stage) in mapping.items():
key = os.path.normpath(os.path.join(directory, key))
if key in self.config and self.config[key] != stage and not accept_dups:
logger.error('test runner already has configuration for %s', key)
continue
self.config[key] = stage
def test_file(self, target):
target = os.path.normpath(target)
dirs = list(all_directories(os.path.dirname(target)))
expect_success = 'invalid' not in dirs
if target in self.config:
test_stage = self.config[target]
else:
test_stage = autodetect_stage(dirs)
# TODO: these shouldn't need to run sequentially
args = [self.cmd, target]
if sys.platform != 'win32':
args = ' '.join(args)
process = Popen(args, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
process.wait()
returncode = process.returncode
err_msg = process.stderr.read()
return self.evaluate_test(target, expect_success, test_stage, returncode, err_msg)
def evaluate_test(self, filename, expect_success, test_stage, returncode, err_msg):
if returncode == 0:
if expect_success:
if test_stage == CODE_GEN_STAGE and self.run_programs:
self.evaluate_run(filename)
else:
self.succeed(filename, 'no errors')
return
self.fail(filename, describe_for_stage('error', test_stage), 'the test passed all stages')
return
# Return code 1 means a controlled compiler error
if returncode == 1:
error_stage = parse_stage(err_msg)
if error_stage is None:
if expect_success:
expected = 'to pass ' + describe_for_stage('stage', test_stage, 'all stages')
else:
expected = describe_for_stage('error', test_stage)
self.fail(filename, expected, 'got error and could not identify type', err_msg)
self.status = PROGRAM_ERROR
return
error_description = describe_for_stage('error', error_stage, 'unidentified error')
# If the stage the test case targets could not be detected, give a
# warning but succeed if the test case was expected to fail. If it
# was expected to succeed, give a warning and also fail.
if test_stage == UNDETECTED_STAGE:
logger.warn('cannot validate error raised by %s\n'
' The expected error type was not detected', filename)
if not expect_success:
self.succeed(filename, error_description)
return
self.fail(filename, 'test to pass', 'got '+error_description, err_msg)
return
if expect_success:
if error_stage <= test_stage:
self.fail(filename, describe_for_stage('stage', test_stage, 'all stages') + ' to pass', 'got '+error_description, err_msg)
return
self.succeed(filename, error_description)
return
if error_stage != test_stage:
self.fail(filename, describe_for_stage('error', test_stage), 'got '+error_description, err_msg)
return
self.succeed(filename, error_description)
return
# Any other return code means an internal error
expected = describe_for_stage('to pass' if expect_success else 'error', test_stage)
self.fail(filename, expected, 'got internal error', err_msg, status=TEST_ERROR)
def evaluate_run(self, filename):
expected_output_filename = get_expected_output_file(filename)
if os.path.exists(expected_output_filename) and os.stat(expected_output_filename).st_mtime > os.stat(filename).st_mtime:
with open(expected_output_filename) as expected_output_file:
expected_output = expected_output_file.read()
else:
try:
expected_output = check_err_output('go run "'+filename+'"', universal_newlines=True)
except CalledProcessError:
self.fail_with_message(filename,
'could not run {} with Go'.format(expected_output_filename),
PROGRAM_ERROR)
return
with open(expected_output_filename, 'w') as output_file:
output_file.write(expected_output)
js_filename = os.path.splitext(filename)[0]+'.js'
try:
actual_output = check_output('node "'+js_filename+'"', universal_newlines=True)
except CalledProcessError:
self.fail_with_message(filename,
'could not run {} with Node.js'.format(js_filename),
PROGRAM_ERROR)
return
if expected_output == actual_output:
self.succeed(filename, 'output matches expectation')
logger.debug('Expected output:\n%s', expected_output)
else:
msg = 'Output for {} was not what was expected:\n'.format(filename)
msg += '\n'.join(Differ().compare(expected_output.split('\n'), actual_output.split('\n')))
self.fail_with_message(filename, msg, TESTS_FAILED)
class InteractiveTestRunner (TestRunner):
def fail_with_message(self, filename, *args, **kwargs):
super().fail_with_message(filename, *args, **kwargs)
print()
InteractiveCmd(self, filename).cmdloop()
class InteractiveCmd (cmd.Cmd):
intro = 'Response?\n c: continue\t\te: edit\t\tq: quit\n s: set expected\tr: rerun\t?: help'
prompt = '-> '
def __init__(self, runner, filename):
super().__init__()
self.runner = runner
self.filename = filename
self.cmds = [c[3:] for c in dir(self) if c.startswith('do_')]
def precmd(self, line):
words = line.split()
if not (words and words[0] not in self.cmds):
return line
avail = [c for c in self.cmds if c.startswith(words[0])]
if len(avail) == 0:
return line
if len(avail) > 1:
print(' Multiple possibilities:', '\t'.join(avail))
return ''
words[0] = avail[0]
return ' '.join(words)
def do_tag(self, arg):
"""Add this test to a file tracking test cases with a particular tag
"""
tags = arg.split()
for tag in tags:
if not tag:
continue
if len(tag) == 0 or not tag.isidentifier():
print('bad tag:', tag)
continue
with open('test_tag_'+tag+'.txt', 'a') as f:
f.write('{}\n'.format(os.path.normpath(self.filename)))
def do_notes(self, arg):
"""Write a note about this test to test_notes.txt
"""
fn = os.path.normpath(self.filename)
with open('test_notes.txt', 'a') as notes:
notes.write('{}: {}\n'.format(fn, arg))
def do_get_notes(self, arg):
"""Get stored notes about this test, if any
"""
leader = os.path.normpath(self.filename)+':'
if os.path.exists('test_notes.txt'):
with open('test_notes.txt') as notes:
for line in notes:
if line.startswith(leader):
print(line[len(leader):].strip())
def do_continue(self, arg):
"""Run next test
"""
return True
def do_edit(self, arg):
"""Open the test program in a shell editor
"""
posix_name = self.filename
if os.path != posixpath:
posix_name = posixpath.join(*posix_name.split(os.path.sep))
check_call('vim | |
Blue":0x5b92e5,
"Unity":0x264d8e,
"Universal Green":0x006b38,
"Universal Khaki":0xb8a992,
"University of California Gold":0xb78727,
"University of Tennessee Orange":0xf77f00,
"Unloaded Texture Purple":0xc154c1,
"Unmarked Trail":0xd2cab7,
"Unmatched Beauty":0xb12d35,
"Unmellow Yellow":0xfefe66,
"Unplugged":0x4b4840,
"Unpredictable Hue":0x7b746b,
"Unreal Teal":0x5c6e70,
"Unripe Strawberry":0xf78fa7,
"Untamed Orange":0xde5730,
"Untamed Red":0xdd0022,
"Unusual Gray":0xa3a7a0,
"Unwind":0xf2f8ed,
"UP Forest Green":0x014431,
"Up in Smoke":0x6e706d,
"UP Maroon":0x7b1113,
"Up North":0x6f9587,
"Upbeat":0xf1d9a5,
"Upper Crust":0xa3758b,
"Upper East Side":0x8d6051,
"Uproar Red":0xee1100,
"Upscale":0xa8adc2,
"Upsdell Red":0xae2029,
"Upsed Tomato":0xb52923,
"Upstream Salmon":0xf99a7a,
"Uptown Girl":0xa791a8,
"Uptown Taupe":0xf1e4d7,
"Upward":0xbdc9d2,
"Urahayanagi Green":0xbcb58c,
"Uran Mica":0x93b778,
"Uranus":0xace5ee,
"Urban Bird":0xddd4c5,
"Urban Charm":0xaea28c,
"Urban Chic":0x464e4d,
"Urban Exploration":0x89776e,
"Urban Garden":0x7c7466,
"Urban Green":0x005042,
"Urban Grey":0xcacacc,
"Urban Jungle":0xa4947e,
"Urban Legend":0x67585f,
"Urban Mist":0xd3dbd9,
"Urban Pigeon":0x9dacb7,
"Urban Putty":0xb0b1a9,
"Urban Raincoat":0xbdcbca,
"Urban Safari":0x978b6e,
"Urban Taupe":0xc9bdb6,
"Urban Vibes":0x8899aa,
"Urbane Bronze":0x54504a,
"Urbanite":0x4d5659,
"Uri Yellow":0xffd72e,
"Urnebes Beige":0xffecc2,
"Urobilin":0xe1ad21,
"US Air Force Blue":0x00308f,
"US Field Drab":0x716140,
"USAFA Blue":0x004f98,
"USC Cardinal":0x990010,
"USC Gold":0xffcc00,
"Used Oil":0x231712,
"Useful Gray":0xcfcabd,
"Ushabti Bone":0xbbbb7f,
"USMC Green":0x373d31,
"Usu Koubai Blossom":0xe597b2,
"Usu Pink":0xa87ca0,
"Usuao Blue":0x8c9c76,
"Usubeni Red":0xf2666c,
"Usugaki Persimmon":0xfca474,
"Usukō":0xfea464,
"Usumoegi Green":0x8db255,
"Utah Crimson":0xd3003f,
"Utah Sky":0xaed1e8,
"Utaupeia":0xa58f7b,
"Utepils":0xfafad2,
"Utterly Beige":0xb5a597,
"UV Light":0x0098c8,
"Va Va Bloom":0xefd5cf,
"Va Va Voom":0xe3b34c,
"Vacation Island":0xd1d991,
"Vacherin Cheese":0xfde882,
"Vagabond":0xaa8877,
"Vaguely Mauve":0xd1c5c4,
"Vaguely Violet":0xdbe1ef,
"Valencia":0xd4574e,
"Valentine":0xa53a4e,
"Valentine Heart":0xba789e,
"Valentine Lava":0xba0728,
"Valentine Red":0x9b233b,
"Valentine's Day":0xa63864,
"Valentino":0xb64476,
"Valentino Nero":0x382c38,
"Valerian":0x9f7a93,
"Valerie":0xfde6e7,
"Valhalla":0x2a2b41,
"Valhallan Blizzard":0xf2ede7,
"Valiant Poppy":0xbc322c,
"Valiant Violet":0x3e4371,
"Valkyrie":0xeecc22,
"Vallarta Blue":0x30658e,
"Valley Flower":0xffdd9d,
"Valley Hills":0x848a83,
"Valley Mist":0xc9d5cb,
"Valley of Fire":0xff8a4a,
"Valley of Glaciers":0x2d7e96,
"Valley Vineyards":0x8a763d,
"Valleyview":0xc2ccac,
"Valonia":0x79c9d1,
"Valor":0xa3bcdb,
"Vampire Bite":0xc40233,
"Vampire Fangs":0xcc2255,
"Vampire Fiction":0x9b0f11,
"Vampire Love Story":0xdd0077,
"Vampire Red":0xdd4132,
"Vampire State Building":0xcc1100,
"Vampiric Bloodlust":0xcc0066,
"Vampiric Shadow":0xbfb6aa,
"Van Cleef":0x523936,
"Van de Cane":0xfaf7eb,
"Van Dyke Brown":0x664228,
"Van Gogh Blue":0xabddf1,
"Van Gogh Green":0x65ce95,
"Van Gogh Olives":0x759465,
"Vanadyl Blue":0x00a3e0,
"Vandermint":0xabdee4,
"Vandyck Brown":0x7b5349,
"Vanilla":0xf3e5ab,
"Vanilla Bean Brown":0x362c1d,
"Vanilla Blush":0xfcede4,
"Vanilla Cream":0xf4d8c6,
"Vanilla Custard":0xf3e0be,
"Vanilla Delight":0xf5e8d5,
"Vanilla Doe":0xd1bea8,
"Vanilla Flower":0xe9dfcf,
"Vanilla Frost":0xfde9c5,
"Vanilla Ice":0xfdf2d1,
"Vanilla Ice Cream":0xffe6b3,
"Vanilla Ice Smoke":0xc9dae2,
"Vanilla Love":0xe6e0cc,
"Vanilla Milkshake":0xf1ece2,
"Vanilla Mocha":0xebdbc8,
"Vanilla Paste":0xf3e7d3,
"Vanilla Powder":0xfaf3dd,
"Vanilla Pudding":0xf7e26b,
"Vanilla Quake":0xcbc8c2,
"Vanilla Seed":0xccb69b,
"Vanilla Shake":0xfffbf0,
"Vanilla Tan":0xf1e9dd,
"Vanilla Wafer":0xf3ead2,
"Vanilla White":0xf6eee5,
"Vanillin":0xf2e3ca,
"Vanishing":0x331155,
"Vanishing Blue":0xcfdfef,
"Vanishing Night":0x990088,
"Vanishing Point":0xddeedd,
"Vanity":0x5692b2,
"Vanity Pink":0xe6ccdd,
"Vantablack":0x000100,
"Vape Smoke":0xe8e8d7,
"Vapor":0xf0ffff,
"Vapor Blue":0xbebdbd,
"Vapor Trail":0xf5eedf,
"Vaporous Grey":0xdfddd7,
"Vaporwave":0xff66ee,
"Vaporwave Pool":0x99eebb,
"Vaquero Boots":0x855f43,
"Varden":0xfdefd3,
"Variegated Frond":0x747d5a,
"Varnished Ivory":0xe6dccc,
"Vast":0xc9bdb8,
"Vast Desert":0xc2b197,
"Vast Escape":0xd2c595,
"Vast Sky":0xa9c9d7,
"Vega Violet":0xaa55ff,
"Vegan":0x22bb88,
"Vegan Green":0x006c47,
"Vegan Mastermind":0x22bb55,
"Vegan Villain":0xaa9911,
"Vegas Gold":0xc5b358,
"Vegeta Blue":0x26538d,
"Vegetable Garden":0x8b8c40,
"Vegetarian":0x22aa00,
"Vegetarian Veteran":0x78945a,
"Vegetarian Vulture":0xcccc99,
"Vegetation":0x5ccd97,
"Vehicle Body Grey":0x4c433d,
"Veil of Dusk":0xdad8c9,
"Veiled Chameleon":0x80b690,
"Veiled Delight":0xb2b0bd,
"Veiled Rose":0xf8cdc9,
"Veiled Spotlight":0xcfd5d7,
"Veiled Violet":0xb19bb0,
"Velddrif":0xa17d61,
"Vellum Parchment":0xefe4d9,
"Velour":0xbaa7bf,
"Veltliner White":0xd7d8c3,
"Velum Smoke":0xd6ceb9,
"Velvet":0x750851,
"Velvet Beige":0xd0c5b1,
"Velvet Black":0x241f20,
"Velvet Blush":0xe3d5d8,
"Velvet Cake":0x9d253d,
"Velvet Cape":0x623941,
"Velvet Clover":0x656d63,
"Velvet Cosmos":0x441144,
"Velvet Crest":0x9291bc,
"Velvet Cupcake":0xaa0066,
"Velvet Curtain":0x7e85a3,
"Velvet Dawn":0xbdb0bc,
"Velvet Ears":0xc5adb4,
"Velvet Evening":0x33505e,
"Velvet Green":0x2f5d50,
"Velvet Green Grey":0x737866,
"Velvet Grey":0xacaab3,
"Velvet Leaf":0x96c193,
"Velvet Magic":0xbb1155,
"Velvet Mauve":0x692b57,
"Velvet Morning":0x60688d,
"Velvet Robe":0x939dcc,
"Velvet Rope":0x36526a,
"Velvet Rose":0x7e374c,
"Velvet Scarf":0xe3dfec,
"Velvet Sky":0xc5d3dd,
"Velvet Slipper":0x846c76,
"Velvet Touch":0x523544,
"Velvet Umber":0x6b605a,
"Velvet Violet":0x43354f,
"Velvet Wine":0x9a435d,
"Velveteen Crush":0x936064,
"Velvety Chestnut":0xa2877d,
"Velvety Merlot":0x794143,
"Venetian":0x928083,
"Venetian Glass":0x9cb08a,
"Venetian Gold":0xb39142,
"Venetian Lace":0xf7edda,
"Venetian Mask":0xe7ceb6,
"Venetian Nights":0x7755ff,
"Venetian Pearl":0xd2ead5,
"Venetian Pink":0xbb8e84,
"Venetian Red":0xc80815,
"Venetian Rose":0xefc6e1,
"Venetian Wall":0x949486,
"Venetian Yellow":0xf6e3a1,
"Venice Blue":0x2c5778,
"Venice Square":0xe6c591,
"Venom":0xa9a52a,
"Venom Dart":0x01ff01,
"Venom Wyrm":0x607038,
"Venomous Green":0x66ff22,
"Venous Blood Red":0x3f3033,
"Ventilated":0xcde6e8,
"Venture Violet":0x7381b3,
"Venus":0xeed053,
"Venus Deathtrap":0xfed8b1,
"Venus Deva":0x8f7974,
"Venus Flower":0x9ea6cf,
"Venus Flytrap":0x94b44c,
"Venus Mist":0x5f606e,
"Venus Pink":0xf0e5e5,
"Venus Slipper Orchid":0xdf73ff,
"Venus Teal":0x85a4a2,
"Venusian":0x71384c,
"Veranda":0x61a9a5,
"Veranda Blue":0x66b6b0,
"Veranda Charm":0x9eb1af,
"Veranda Gold":0xaf9968,
"Veranda Green":0x8e977e,
"Veranda Hills":0xccb994,
"Ver<NAME>":0x9da4be,
"Verbena":0xf1dfdf,
"Verdant":0x847e35,
"Verdant Fields":0x5ad33e,
"Verdant Forest":0x28615d,
"Verdant Green":0x12674a,
"Verdant Views":0x75794a,
"Verde":0x7fb383,
"Ver<NAME>":0x355e3b,
"Ver<NAME>":0x877459,
"Ver<NAME>":0xedf5e7,
"Ver<NAME>":0xa7ad8d,
"Verde Tropa":0x758000,
"Verdigreen":0x81a595,
"Verdigris":0x43b3ae,
"Verdigris Coloured":0x62be77,
"<NAME>":0x62603e,
"Verdigris Green":0x61ac86,
"Verdigris Roundhead":0x558367,
"Verditer":0x00bbaa,
"Verditer Blue":0x55aabb,
"Verdun Green":0x48531a,
"Veri Berri":0x937496,
"Veritably Verdant":0x00844b,
"Vermeer Blue":0x2b7caf,
"Vermicelle":0xdabe82,
"Vermicelles":0xbb835f,
"Vermicelli":0xd1b791,
"Vermilion":0xf4320c,
"Vermilion Bird":0xf24433,
"Vermilion Cinnabar":0xe34244,
"Vermilion Green":0x474230,
"Vermilion Red":0xb5493a,
"Vermillion":0xda3b1f,
"Vermillion Orange":0xf9633b,
"Vermillion Seabass":0x973a36,
"Vermin Brown":0x8f7303,
"Verminal":0x55cc11,
"Verminlord Hide":0xa16954,
"Vermont Cream":0xf8f5e8,
"Vermont Slate":0x48535a,
"Verona Beach":0xe9d3ba,
"Veronese Peach":0xecbfa8,
"Veronica":0xa020ff,
"Vers de Terre":0xacdfad,
"Versailles Rose":0xc4b0ad,
"Versatile Gray":0xc1b6ab,
"Verse Green":0x18880d,
"Vert Pierre":0x4a615c,
"Vertigo Cherry":0x990055,
"Verve":0xfcedd8,
"Verve Violet":0x944f80,
"Very Berry":0xb73275,
"Very Coffee":0x664411,
"Very Grape":0x927288,
"Very Light Blue":0xd5ffff,
"Very Navy":0x3a4859,
"Very Pale Blue":0xd6fffe,
"Vespa Yellow":0xf3d19f,
"Vesper":0x0011cc,
"Vesper Violet":0x99a0b2,
"Vessel":0xcdc8bf,
"Vestige":0x937899,
"Vesuvian Green":0x879860,
"Vesuvian Violet":0xa28a9f,
"Vesuvius":0xa85533,
"Vetiver":0x807d6f,
"Viaduct":0xc1bbb0,
"Viameter":0xd9d140,
"Vibrant":0xffd44d,
"Vibrant Amber":0xd1902e,
"Vibrant Blue":0x0339f8,
"Vibrant Green":0x0add08,
"Vibrant Honey":0xffbd31,
"Vibrant Hue":0x544563,
"Vibrant Orange":0xff7420,
"Vibrant Orchid":0x804b81,
"Vibrant Purple":0xad03de,
"Vibrant Red":0xc24c6a,
"Vibrant Soft Blue":0x88d6dc,
"Vibrant Velvet":0xbb0088,
"Vibrant Vine":0x4b373a,
"Vibrant Vision":0x6c6068,
"Vibrant White":0xeaedeb,
"Vibrant Yellow":0xffda29,
"VIC 20 Blue":0xc7ffff,
"VIC 20 Creme":0xffffb2,
"VIC 20 Green":0x94e089,
"VIC 20 Pink":0xea9ff6,
"VIC 20 Sky":0x87d6dd,
"Vicarious Violet":0x5f4d50,
"Vice City":0xee00dd,
"Vicious Violet":0x8f509d,
"Victoria":0x564985,
"Victoria Blue":0x08589d,
"Victoria Green":0x006a4d,
"Victoria Peak":0x007755,
"Victoria Red":0x6a3c3a,
"Victorian":0x988f97,
"<NAME>ottage":0xd4c5ca,
"<NAME>rown":0xc38b36,
"<NAME>":0xa2783b,
"Victorian Greenhouse":0x00b191,
"<NAME>":0x6d657e,
"<NAME>":0xefe1cd,
"<NAME>":0xb68b88,
"<NAME>":0x104a65,
"<NAME>":0xf1e3d8,
"<NAME>":0x828388,
"<NAME>":0x8e6278,
"<NAME>":0xd28085,
"<NAME>":0xae6aa1,
"Victorian Violet":0xb079a7,
"Victoriana":0xd6b2ad,
"Victory Blue":0x3a405a,
"Victory Lake":0x92abd8,
"Vida Loca":0x549019,
"Vidalia":0xa1ddd4,
"<NAME>":0xf7efef,
"<NAME>ace":0xe9d9d4,
"Vienna Roast":0x330022,
"<NAME>":0xfed1bd,
"Viennese":0x8c8185,
"Viennese Blue":0x4278af,
"Vietnamese Lantern":0xeec172,
"Vigilant":0x81796f,
"Vigorous Violet":0x645681,
"Viking":0x4db1c8,
"Viking Castle":0x757266,
"Viking Diva":0xcabae0,
"Vile Green":0x8fcdb0,
"Villa White":0xefeae1,
"Village Crier":0xab9769,
"Village Green":0x7e867c,
"Village Square":0x7b6f60,
"Villandry":0x728f66,
"Vin Cuit":0xb47463,
"Vin Rouge":0x955264,
"Vinaigrette":0xefdaae,
"Vinalhaven":0xacb3ae,
"Vinca":0x5778a7,
"Vincotto":0x483743,
"Vindaloo":0xae7579,
"Vine Leaf":0x4d5f4f,
"Vine Leaf Green":0x6e5e2c,
"Vineyard":0x819e84,
"Vineyard Autumn":0xee4455,
"Vineyard Green":0x5f7355,
"Vineyard Wine":0x58363d,
"Vinho do Porto":0xb31a38,
"Vining Ivy":0x4b7378,
"Vino Tinto":0x4c1c24,
"Vintage":0x847592,
"Vintage Beige":0xdfe1cc,
"Vintage Blue":0x87b8b5,
"Vintage Charm":0xc7b0a7,
"Vintage Copper":0x9d5f46,
"Vintage Coral":0xd68c76,
"Vintage Ephemera":0xd8ceb9,
"Vintage Gold":0xb79e78,
"Vintage Grape":0x6f636d,
"Vintage Indigo":0x4a556b,
"Vintage Khaki":0x9a9186,
"Vintage Lace":0xf1e7d2,
"Vintage Linen":0xe3dcca,
"Vintage Mauve":0xbaafac,
"Vintage Merlot":0x763d4b,
"Vintage Orange":0xffb05f,
"Vintage Plum":0x675d62,
"Vintage Porcelain":0xf2edec,
"Vintage Pottery":0xa66c47,
"Vintage Red":0x9e3641,
"Vintage Ribbon":0x9097b4,
"Vintage Taupe":0xcdbfb9,
"Vintage Tea Rose":0xcbb0a8,
"Vintage Teal":0x669699,
"Vintage Velvet":0x485169,
"Vintage Vessel":0x94b2a6,
"Vintage Vibe":0x888f4f,
"Vintage Victorian":0xe59dac,
"Vintage Violet":0x634f62,
"Vintage White":0xf4efe4,
"Vintage Wine":0x65344e,
"Vintner":0x68546a,
"Viola":0x966ebd,
"Viola Black":0x2f2a41,
"Viola Grey":0x8c6897,
"Viola Ice Grey":0xc6c8d0,
"Viola Sororia":0xb9a5bd,
"Violaceous":0xbf8fc4,
"Violaceous Greti":0x881188,
"Violent Violet":0x7f00ff,
"Violet":0x9a0eea,
"Violet Aura":0x838ba4,
"Violet Beauty":0xbab3cb,
"Violet Black":0x49434a,
"Violet Blue":0x510ac9,
"Violet Bouquet":0xb9b1c8,
"Violet Clues":0xefecef,
"Violet Crush":0xd8d3e6,
"Violet Dawn":0xa89b9c,
"Violet Echo":0xdfdee5,
"Violet Eclipse":0xa387ac,
"Violet Eggplant":0x991199,
"Violet Essence":0xe6e5e6,
"Violet Evening":0x65677a,
"Violet Extract":0xdee2ec,
"Violet Fields":0xb8a4c8,
"Violet Frog":0x926eae,
"Violet Gems":0xc4c0e9,
"Violet Glow":0x4422ee,
"Violet Haze":0x675b72,
"Violet Heaven":0xcdb7fa,
"Violet Hickey":0x330099,
"Violet Hush":0xe5e2e7,
"Violet Ice":0xc2acb1,
"Violet Indigo":0x3e285c,
"Violet Ink":0x9400d3,
"Violet Intense":0x4d4456,
"Violet Kiss":0xf0a0d1,
"Violet Majesty":0x644982,
"Violet Mist":0xdaccde,
"Violet Mix":0xaca8cd,
"Violet Orchid":0xca7988,
"Violet Persuasion":0x927b97,
"Violet Pink":0xfb5ffc,
"Violet Poison":0x8601bf,
"Violet Posy":0x60394d,
"Violet Powder":0xc7ccd8,
"Violet Purple":0x3a2f52,
"Violet Quartz":0x8b4963,
"Violet Red":0xa50055,
"Violet Scent Soft Blue":0xbcc6df,
"Violet Shadow":0x4d4860,
"Violet Storm":0x5c619d,
"Violet Sweet Pea":0xc7c5dc,
"Violet Tulip":0x9e91c3,
"Violet Tulle":0xc193c0,
"Violet Vapor":0xe5dae1,
"Violet Velvet":0xb19cd9,
"Violet Verbena":0x898ca3,
"Violet Vibes":0x898098,
"Violet Vignette":0xd8e0ea,
"Violet Vision":0xb7bdd1,
"Violet Vista":0xb09f9e,
"Violet Vixen":0x883377,
"Violet Vogue":0xe9e1e8,
"Violet Water":0xd2d6e6,
"Violet Webcap":0x833e82,
"Violet Whimsey":0xdad6df,
"Violet White":0xe2e3e9,
"Violeta Silvestre":0xaca7cb,
"Violets Are Blue":0x7487c6,
"Violin Brown":0x674403,
"Virgin Olive Oil":0xe2dcab,
"Virgin Peach":0xecbdb0,
"Virginia Blue":0xb7c3d7,
"Viric Green":0x99cc00,
"Viridian":0x1e9167,
"Viridian Green":0xbcd7d4,
"Viridine Green":0xc8e0ab,
"Viridis":0x00846b,
"Virtual Boy":0xfe0215,
"Virtual Forest":0x8aa56e,
"Virtual Pink":0xc6174e,
"Virtual Taupe":0x8a7a6a,
"Virtual Violet":0x66537f,
"Virtuoso":0x5d5558,
"Virtuous":0x9f7ba9,
"Virtuous Violet":0xb7b0bf,
"Vis Vis":0xf9e496,
"Vision":0xd2cce5,
"Vision of Light":0xdfd3cb,
"Vision Quest":0x9b94c2,
"Visiona Red":0x83477d,
"Visionary":0xf6e0a9,
"Vista Blue":0x97d5b3,
"Vista White":0xe3dfd9,
"Vital Green":0x138859,
"Vital Yellow":0xede0c5,
"Vitalize":0x2aaa45,
"Vitamin C":0xff9900,
"Viva Gold":0xe3ac72,
"Viva La Bleu":0x97bee2,
"Viva Las Vegas":0xb39953,
"Vivacious":0xa32857,
"Vivacious Pink":0xdc89a8,
"Vivacious Violet":0x804665,
"Vivaldi Red":0xef3939,
"Vivid Amber":0xcc9900,
"Vivid Auburn":0x922724,
"Vivid Blue":0x152eff,
"Vivid Burgundy":0x9f1d35,
"Vivid Cerise":0xda1d81,
"Vivid Cerulean":0x00aaee,
"Vivid Crimson":0xcc0033,
"Vivid Green":0x2fef10,
"Vivid Imagination":0x5c9f59,
"Vivid Lime Green":0xa6d608,
"Vivid Malachite":0x00cc33,
"Vivid Mulberry":0xb80ce3,
"Vivid Orange":0xff5f00,
"Vivid Orange Peel":0xffa102,
"Vivid Orchid":0xcc00ff,
"Vivid Purple":0x9900fa,
"Vivid Raspberry":0xff006c,
"Vivid Red":0xf70d1a,
"Vivid Red Tangelo":0xdf6124,
"Vivid Sky Blue":0x00ccff,
"Vivid Tangelo":0xf07427,
"Vivid Tangerine":0xff9980,
"Vivid Vermilion":0xe56024,
"Vivid Viola":0x993c7c,
"Vivid Violet":0x9f00ff,
"Vivid Vision":0x5e4b62,
"Vivid Yellow":0xffe302,
"Vixen":0x573d37,
"Vizcaya":0x7b9e98,
"Vizcaya Palm":0x47644b,
"Vodka":0xbfc0ee,
"Void":0x050d25,
"Voila!":0xaf8ba8,
"Volcanic":0xa55749,
"Volcanic Ash":0x6f7678,
"Volcanic Blast":0xe15835,
"Volcanic Brick":0x72453a,
"Volcanic Glass":0x615c60,
"Volcanic Island":0x605244,
"Volcanic Rock":0x6b6965,
"Volcanic Sand":0x404048,
"Volcanic Stone Green":0x45433b,
"Volcano":0x4e2728,
"Voldemort":0x2d135f,
"Volt":0xceff00,
"Voltage":0x3b4956,
"Voluptuous Violet":0x7711dd,
"Volute":0x445a5e,
"Voodoo":0x443240,
"Voxatron Purple":0x83769c,
"Voyage":0x719ca4,
"Voyager":0x4d5062,
"Voysey Grey":0x9a937f,
"Vulcan":0x36383c,
"Vulcan Burgundy":0x5f3e42,
"Vulcan Mud":0x897f79,
"Waaagh! Flesh":0x1f5429,
"Wabi-Sabi":0xc8c8b5,
"Waddles Pink":0xeeaacc,
"Wafer":0xd4bbb1,
"Waffle Cone":0xe2c779,
"Wafting Grey":0xcdbdba,
"Wageningen Green":0x34b233,
"Wagon Wheel":0xc2b79e,
"Wahoo":0x272d4e,
"Waikawa Grey":0x5b6e91,
"Waikiki":0x218ba0,
"Wailing Woods":0x004411,
"Wainscot Green":0x9c9d85,
"Waiouru":0x4c4e31,
"Waiporoporo Purple":0x654bc9,
"Waiting":0x9d9d9d,
"Wakame Green":0x00656e,
"Wakatake Green":0x6b9362,
"Wake Me Up":0xf6d559,
"Wakefield":0x295468,
"Walden Pond":0x789bb6,
"Walk in the Park":0x88bb11,
"Walk in the Woods":0x3bb08f,
"Walk Me Home":0x496568,
"Walker Lake":0x3d87bb,
"Wall Green":0xabae86,
"Wall Street":0x656d73,
"Walled Garden":0x11cc44,
"Walleye":0x9b5953,
"Wallflower":0xa0848a,
"Wallflower White":0xe4e3e6,
"Wallis":0xc6bdbf,
"Walls of Santorini":0xe9edf1,
"Walnut":0x773f1a,
"Walnut Grove":0x5c5644,
"Walnut Hull":0x5d5242,
"Walnut Oil":0xeecb88,
"Walnut Shell":0xaa8344,
"Walnut Shell Brown":0xa68b6e,
"Walrus":0x999b9b,
"Wan Blue":0xcbdcdf,
"Wan White":0xe4e2dc,
"Wanderer":0x5e5648,
"Wandering River":0x73a4c6,
"Wandering Road":0x876d5e,
"Wandering Willow":0xa6a897,
"Wanderlust":0x426267,
"War God":0x643530,
"Warlock Red":0xb50038,
"Warlord":0xba0033,
"Warm and Toasty":0xcbb68f,
"Warm Apricot":0xffb865,
"Warm Ash":0xcfc9c7,
"Warm Asphalt":0x9c9395,
"Warm Biscuits":0xe3cdac,
"Warm Black":0x004242,
"Warm Blue":0x4b57db,
"Warm Bread":0xf9e6d3,
"Warm Brown":0x964e02,
"Warm Brownie":0x604840,
"Warm Buttercream":0xe6d5ba,
"Warm Butterscotch":0xd0b082,
"Warm Cider":0xbf6a52,
"Warm Cocoon":0xf9d09c,
"Warm Cognac":0xa88168,
"Warm Comfort":0xb38a82,
"Warm Croissant":0xe4ceb5,
"Warm Earth":0x927558,
"Warm Embrace":0x93817e,
"Warm Fuzzies":0xf6e2ce,
"Warm Glow":0xf1cf8a,
"Warm Granite":0xa49e97,
"Warm Grey":0x978a84,
"Warm Grey Flannel":0xaca49a,
"Warm Haze":0x736967,
"Warm Hearth":0xbe9677,
"Warm Leather":0xc89f59,
"Warm Light":0xfff9d8,
"Warm Mahogany":0x6d4741,
"Warm Muffin":0xe1be8b,
"Warm Neutral":0xc1b19d,
"Warm Nutmeg":0x8f6a50,
"Warm Oats":0xd8cfba,
"Warm Olive":0xc7b63c,
"Warm Onyx":0x4c4845,
"Warm Pewter":0xb4ada6,
"Warm Pink":0xfb5581,
"Warm Port":0x513938,
"Warm Pumpernickel":0x5c4e44,
"Warm Purple":0x952e8f,
"Warm Sand":0xc5ae91,
"Warm Shell":0xddc9b1,
"Warm Spice":0x987744,
"Warm Spring":0x4286bc,
"Warm Stone":0xa79a8a,
"Warm Sun":0xfaf6db,
"Warm Taupe":0xaf9483,
"Warm Terra Cotta":0xc1775e,
"Warm Turbulence":0xf3f5dc,
"Warm Up":0x9e6654,
"Warm Wassail":0xa66e68,
"Warm Waters":0x7ebbc2,
"Warm Welcome":0xea9073,
"Warm Wetlands":0x8d894a,
"Warm White":0xefebd8,
"Warm Winter":0xd4ede3,
"Warm Woolen":0xd0b55a,
"Warmed Wine":0x5c3839,
"Warming Peach":0xe4b9a2,
"Warmstone":0xe6d7cc,
"Warmth":0x9f552d,
"Warp Drive":0xeaf2f1,
"Warpfiend Grey":0x6b6a74,
"Warplock Bronze":0x515131,
"Warplock Bronze Metal":0x927d7b,
"Warpstone Glow":0x168340,
"Warrant":0xb8966e,
"Warren Tavern":0x6b654e,
"Warrior":0x7d685b,
"Wasabi":0xafd77f,
"Wasabi Green":0xa9ad74,
"Wasabi Nori":0x333300,
"Wasabi Nuts":0x849137,
"Wasabi Paste":0xcae277,
"Wasabi Peanut":0xb4c79c,
"Wasabi Powder":0xbdb38f,
"Wasabi Zing":0xd2cca0,
"Wash Me":0xfafbfd,
"Washed Black":0x1f262a,
"Washed Blue":0x94d1df,
"Washed Denim":0x819dbe,
"Washed Dollar":0xe1e3d7,
"Washed Green":0xccd1c8,
"Washed in Light":0xfae8c8,
"Washed Khaki":0xcac2af,
"Washed Olive":0xc5c0a3,
"Washed Out Green":0xbcf5a6,
"Washed-Out Crimson":0xffb3a7,
"Washing Powder Soft Blue":0xc3d8e4,
"Washing Powder White":0xc2dce3,
"Wasteland":0x9c8855,
"Wasurenagusa Blue":0x89c3eb,
"Watchet":0x8fbabc,
"Water":0xd4f1f9,
"Water Baby":0x5ab5cb,
"Water Baptism":0xcfdfdd,
"Water Blue":0x0e87cc,
"Water Carrier":0x4999a1,
"Water Chestnut":0xede4cf,
"Water Chi":0x355873,
"Water Cooler":0x75a7ad,
"Water Droplet":0xe1e5dc,
"Water Fern":0x75b790,
"Water Flow":0x7ac6d9,
"Water Glitter":0x76afb6,
"Water Green":0x81b89a,
"Water Hyacinth":0xa0a3d2,
"Water Iris":0xe2e3eb,
"Water Leaf":0xb6ecde,
"Water Lily":0xdde3d5,
"Water Lily White":0xe6d6c4,
"Water Mist":0xc7d8e3,
"Water Music":0x6fb0be,
"Water Ouzel":0x4f5156,
"Water Park":0x54af9c,
"Water Persimmon":0xb56c60,
"Water Raceway":0x0083c8,
"Water Reed":0xb0ab80,
"Water Scrub":0x949381,
"Water Slide":0xa2cdd2,
"Water Spirit":0x65a5d5,
"Water Sports":0x44bbcc,
"Water Sprout":0xe5eecc,
"Water Squirt":0xd8ebea,
"Water Surface":0xa9bdb8,
"Water Tower":0x958f88,
"Water Wash":0xacc7e5,
"Water Welt":0x3994af,
"Water Wheel":0xa28566,
"Water Wonder":0x80d4d0,
"Watercolour Blue":0x084d58,
"Watercolour Green":0x96b47e,
"Watercolour White":0xdbe5db,
"Watercourse":0x006e4e,
"Watercress":0x6e9377,
"Watercress Pesto":0xc7c7a1,
"Watercress Spice":0x748c69,
"Waterfall":0x3ab0a2,
"Waterfall Mist":0xe4eeea,
"Waterhen Back":0x2f3f53,
"Waterline Blue":0x436bad,
"Waterloo":0x7b7c94,
"Watermark":0xdee9df,
"Watermelon":0xfd4659,
"Watermelon Candy":0xfd5b78,
"Watermelon Juice":0xf05c85,
"Watermelon Milk":0xdfcfca,
"Watermelon Pink":0xc77690,
"Watermelon Punch":0xe08880,
"Watermelon Red":0xbf4147,
"Watermelon Slice":0xe77b68,
"Watermelonade":0xeb4652,
"Watermill Wood":0xd3cccd,
"Waterpark":0xc9e3e5,
"Waterscape":0xdcece7,
"Watershed":0xb0cec2,
"Waterslide":0xd2f3eb,
"Waterspout":0xa4f4f9,
"Waterway":0x7eb7bf,
"Waterwings":0xafebde,
"Waterworld":0x00718a,
"Watery":0xaebdbb,
"Watery Sea":0x88bfe7,
"Watson Lake":0x74aeba,
"Wattle":0xd6ca3d,
"Watusi":0xf2cdbb,
"Wave":0xa5ced5,
"Wave Crest":0xdce9ea,
"Wave Jumper":0x6c919f,
"Wave of Grain":0xa0764a,
"Wave Splash":0xcbe4e7,
"Wave Top":0xafd9d3,
"Wavecrest":0xd6e1e4,
"Wavelet":0x7dc4cd,
"Waves of Grain":0xc7aa7c,
"Waves Queen":0xd2eaea,
"Wax":0xddbb33,
"Wax Crayon Blue":0x00a4a6,
"Wax Flower":0xeeb39e,
"Wax Green":0xd8db8b,
"Wax Poetic":0xf1e6cc,
"Wax Sculpture":0xe2d5bd,
"Wax Way":0xd3b667,
"Wax Wing":0xf6ecd6,
"Wax Yellow":0xede9ad,
"Waxen Moon":0xb38241,
"Waxy Corn":0xf8b500,
"Way Beyond the Blue":0x1188cc,
"Waystone Green":0x00c000,
"Wayward Willow":0xd9dcd1,
"Wayward Wind":0xdedfe2,
"Waywatcher Green":0x99cc04,
"Waza Bear":0x5e5a59,
"Wazdakka Red":0xb21b00,
"We Peep":0xfdd7d8,
"Weak Blue":0xcfe2ef,
"Weak Green":0xe1f2df,
"Weak Mauve":0xeadee4,
"Weak Mint":0xe0f0e5,
"Weak Orange":0xfaede3,
"Weak Pink":0xecdee5,
"Weak Yellow":0xf1f5db,
"Weapon Bronze":0xb47b27,
"Weather Board":0x9f947d,
"Weathered Bamboo":0x593a27,
"Weathered Blue":0xd2e2f2,
"Weathered Brown":0x59504c,
"Weathered Coral":0xead0a9,
"Weathered Fossil":0x988a72,
"Weathered Hide":0xd5c6c2,
"Weathered Leather":0x90614a,
"Weathered Mint":0xe4f5e1,
"Weathered Moss":0xbabbb3,
"Weathered Pebble":0x7b9093,
"Weathered Pink":0xeadfe8,
"Weathered Plastic":0xf9f4d9,
"Weathered Saddle":0xb5745c,
"Weathered Sandstone":0xdfc0a6,
"Weathered Shingle":0x937f68,
"Weathered Stone":0xc4c4c4,
"Weathered White":0xe6e3d9,
"Weathered Wicker":0x97774d,
"Weathered Wood":0xb19c86,
"Weathervane":0x2c201a,
"Weaver's Spool":0xbfb18a,
"Weaver's Tool":0x9d7f62,
"Web Gray":0x616669,
"Webcap Brown":0x8f684b,
"Wedded Bliss":0xedeadc,
"Wedding Cake":0xeee2c9,
"Wedding Cake White":0xe7e8e1,
"Wedding Dress":0xfefee7,
"Wedding Flowers":0xbcb6cb,
"Wedding in White":0xfffee5,
"Wedding Pink":0xf6dfd8,
"Wedge of Lime":0xe1eca5,
"Wedgewood":0x4c6b88,
"Weekend Gardener":0x9fe4aa,
"Weekend Retreat":0xe9c2ad,
"Weeping Willow":0xb3b17b,
"Weeping Wisteria":0xd7ddec,
"Wèi Lán Azure":0x5a06ef,
"Weird Green":0x3ae57f,
"Weissbier":0xb3833b,
"Weisswurst White":0xe4e1d6,
"Welcome Home":0xc09c6a,
"Welcome Walkway":0xd4c6a7,
"Welcome White":0xf3e3ca,
"Welcoming Wasp":0xeeaa00,
"Welded Iron":0x6f6f6d,
"Weldon Blue":0x7c98ab,
"Well Blue":0x00888b,
"Well Read":0x8e3537,
"Well-Bred Brown":0x564537,
"Wellington":0x4f6364,
"Wells Grey":0xb9b5a4,
"Welsh Onion":0x22bb66,
"Wenge":0x645452,
"Wenge Black":0x3e2a2c,
"Wentworth":0x345362,
"West Coast":0x5c512f,
"West Side":0xe5823a,
"Westar":0xd4cfc5,
"Westcar Papyrus":0xa49d70,
"Westchester Gray":0x797978,
"Western Red":0x9b6959,
"Western Reserve":0x8d876d,
"Western Sky":0xfadca7,
"Western Sunrise":0xdaa36f,
"Westfall Yellow":0xfcd450,
"Westhaven":0x2a4442,
"Westhighland White":0xf3eee3,
"Westminster":0x9c7c5b,
"Wet Adobe":0xa3623b,
"Wet Aloeswood":0x5a6457,
"Wet Ash":0xb2beb5,
"Wet Asphalt":0x989cab,
"Wet Cement":0x89877f,
"Wet Clay":0xa49690,
"Wet Concrete":0x353838,
"Wet Coral":0xd1584c,
"Wet Crow's Wing":0x000b00,
"Wet Latex":0x001144,
"Wet Leaf":0xb9a023,
"Wet Pottery Clay":0xe0816f,
"Wet River Rock":0x897870,
"Wet Sand":0xae8f60,
"Wet Sandstone":0x786d5f,
"Wet Suit":0x50493c,
"Wet Weather":0x929090,
"Wethersfield Moss":0x859488,
"Wetland Stone":0xa49f80,
"Wetlands Swamp":0x372418,
"Wewak":0xf1919a,
"Whale Bone":0xe5e7e5,
"Whale Grey":0x59676b,
"Whale Shark":0x607c8e,
"Whale Skin":0x505a92,
"Whale Watching":0xa5a495,
"Whale's Mouth":0xc7d3d5,
"Whale's Tale":0x115a82,
"Whaling Waters":0x2e7176,
"Wharf View":0x65737e,
"What Inheritance?":0xe8d7bc,
"What We Do in the Shadows":0x441122,
"What's Left":0xfff4e8,
"Wheat":0xfbdd7e,
"Wheat Beer":0xbf923b,
"Wheat Bread":0xdfbb7e,
"Wheat Flour White":0xddd6ca,
"Wheat Grass":0xc7c088,
"Wheat Penny":0x976b53,
"Wheat Seed":0xe3d1c8,
"Wheat Sheaf":0xdfd4c4,
"Wheat Tortilla":0xa49a79,
"Wheatacre":0xad935b,
"Wheaten White":0xfbebbb,
"Wheatfield":0xdfd7bd,
"Wheatmeal":0x9e8451,
"When Blue Met Red":0x584165,
"When Red Met Blue":0x564375,
"Where Buffalo Roam":0xc19851,
"Whero Red":0xdd262b,
"Whetstone Brown":0x9f6f55,
"Whimsical White":0xece4e2,
"Whimsy":0xed9987,
"Whimsy Blue":0xb0dced,
"Whipcord":0xa09176,
"Whiplash":0xc74547,
"Whipped Citron":0xf0edd2,
"Whipped Cream":0xf2f0e7,
"Whipped Mint":0xc7ddd6,
"Whipped Violet":0xa1a8d5,
"Whippet":0xcec1b5,
"Whipping Cream":0xfaf5e7,
"Whirligig":0xe6cdca,
"Whirligig Geyser":0xdfd4c0,
"Whirlpool":0xa5d8cd,
"Whirlpool Green":0xa7d0c5,
"Whirlwind":0xe2d5d3,
"Whiskers":0xf6f1e2,
"Whiskey":0xd29062,
"Whiskey and Wine":0x49463f,
"Whiskey Barrel":0x85705f,
"Whiskey Sour":0xd4915d,
"Whisky":0xc2877b,
"Whisky Barrel":0x96745b,
"Whisky Cola":0x772233,
"Whisky Sour":0xeeaa33,
"Whisper":0xefe6e6,
"Whisper Blue":0xe5e8f2,
"Whisper Green":0xe0e6d7,
"Whisper Grey":0xe9e5da,
"Whisper of Grass":0xcbede5,
"Whisper of Rose":0xcda2ac,
"Whisper Pink":0xdacbbe,
"Whisper Ridge":0xc9c3b5,
"Whisper White":0xede6db,
"Whisper Yellow":0xffe5b9,
"Whispered Secret":0x3f4855,
"Whispering Blue":0xc9dcdc,
"Whispering Grasslands":0xac9d64,
"Whispering Oaks":0x536151,
"Whispering Peach":0xfedcc3,
"Whispering Pine":0xc8cab5,
"Whispering Rain":0xececda,
"Whispering Waterfall":0xe3e6db,
"Whispering Willow":0x919c81,
"Whispering Winds":0xb7c3bf,
"Whistler Rose":0xc49e8f,
"White":0xffffff,
"White Acorn":0xd7a98c,
"White Alyssum":0xefebe7,
"White Asparagus":0xeceabe,
"White Bass":0xe8efec,
"White Beach":0xf5efe5,
"White Bean Hummus":0xe8d0b2,
"White Beet":0xebdfdd,
"White Blaze":0xe3e7e1,
"White Blossom":0xf4ecdb,
"White Blue":0xcdd6db,
"White Blush":0xfbecd8,
"White Box":0xbfd0cb,
"White Bud":0xe3e0e8,
"White Bullet":0xdfdfda,
"White Cabbage":0xb0b49b,
"White Canvas":0xfaece1,
"White Castle":0xdbd5d1,
"White Chalk":0xf6f4f1,
"White Cherry":0xe7dbdd,
"White Chocolate":0xf0e3c7,
"White City":0xd6d0cc,
"White Clay":0xe8e1d3,
"White Cliffs":0xe8e3c9,
"White Coffee":0xe6e0d4,
"White Convolvulus":0xf4f2f4,
"White Crest":0xf9f8ef,
"White Currant":0xf9ebc5,
"White Desert":0xfdfaf1,
"White Dogwood":0xefeae6,
"White Duck":0xcecaba,
"White Edgar":0xededed,
"White Elephant":0xdedee5,
"White Fence":0xf2e9d3,
"White Fever":0xfbf4e8,
"White Flag":0xc8c2c0,
"White Flour":0xf5ede0,
"White Fur":0xf1efe7,
"White Geranium":0xf1f1e1,
"White Glaze":0xddeeee,
"White Gloss":0xffeeee,
"White Glove":0xf0efed,
"White Granite":0xc8d1c4,
"White Grapefruit":0xfcf0de,
"White Grapes":0xbbcc99,
"White Green":0xd6e9ca,
"White Hamburg Grapes":0xe2e6d7,
"White Heat":0xfdf9ef,
"White Heron":0xe7e1d7,
"White Hot Chocolate":0xead8bb,
"White Hyacinth":0xf3e5d1,
"White Hydrangea":0xf9f6dd,
"White Ice":0xd7eee4,
"White Iris":0xdfe2e7,
"White Jade":0xd4dbb2,
"White Jasmine":0xf7f4df,
"White Kitten":0xdfdfdb,
"White Lake":0xe2e7e7,
"White Lavender":0xe1e2eb,
"White Lie":0xdededc,
"White Lightning":0xf9f3db,
"White Lilac":0xe7e5e8,
"White Lily":0xfaf0db,
"White Linen":0xeee7dd,
"White Luxury":0xf7f0e5,
"White Meadow":0xf2e6df,
"White Mecca":0xecf3e1,
"White Metal":0xd1d1cf,
"White Mink":0xefeee9,
"White Mint":0xe0e7da,
"White Mocha":0xe7dccc,
"White Moderne":0xebeae2,
"White Mountain":0xf6eddb,
"White Mouse":0xb9a193,
"White Nectar":0xf8f6d8,
"White Oak":0xce9f6f,
"White Opal":0xe7e2dd,
"White Owl":0xf5f3f5,
"White Peach":0xf9e6da,
"White Pearl":0xede1d1,
"White Pepper":0xb6a893,
"White Picket Fence":0xf0efeb,
"White Pointer":0xdad6cc,
"White Porcelain":0xf8fbf8,
"White Primer":0xc3bdab,
"White Pudding":0xf6e8df,
"White Rabbit":0xf8eee7,
"White Radish":0xe2e8cf,
"White Raisin":0xe5c28b,
"White Rock":0xd4cfb4,
"White Russian":0xf0e0dc,
"White Sage":0xd2d4c3,
"White Sail":0xebebe7,
"White Sand":0xf5ebd8,
"White Sapphire":0xe4eeeb,
"White Scar":0x8c9fa1,
"White Sea":0xd7e5ea,
"White Sesame":0xe4dbce,
"White Shadow":0xd1d3e0,
"White Shoulders":0xf1f0ec,
"White Smoke":0xf5f5f5,
"White Solid":0xf4f5fa,
"White Spruce":0x9fbdad,
"White Sulfur":0xf1faea,
"White Swan":0xe4d7c5,
"White Tiger":0xc5b8a8,
"White Truffle":0xefdbcd,
"White Ultramarine":0x83ccd2,
"White Veil":0xf7f1e3,
"White Vienna":0xc5dcb3,
"White Whale":0xedeeef,
"White Willow":0xecf4dd,
"White Wool":0xf2efde,
"White Zin":0xf8eee3,
"White-Red":0xf3e8ea,
"Whitecap Foam":0xdee3de,
"Whitecap Grey":0xe0d5c6,
"Whitened Sage":0xdee0d2,
"Whitest White":0xf8f9f5,
"Whitetail":0xf4eee5,
"Whitewash":0xfefffc,
"Whitewash Oak":0xcac9c0,
"Whitewashed Fence":0xfaf2e3,
"Whitewater Bay":0xbac4ad,
"Whitney Oaks":0xb2a188,
"Who-Dun-It":0x8b7181,
"Whole Nine Yards":0x03c03c,
"Whole Wheat":0xa48b73,
"Wholemeal Cookie":0xaaa662,
"Wh<NAME>":0xbec1cf,
"Wicked Green":0x9bca47,
"Wicker Basket":0x847567,
"Wickerware":0xfce4af,
"Wickerwork":0xc19e80,
"Wickford Bay":0x4f6c8f,
"Wide Sky":0x416faf,
"Widowmaker":0x99aaff,
"Wiener Dog":0x874e3c,
"Wiener Schnitzel":0xee9900,
"Wiggle":0xc9c844,
"Wild Apple":0xfef9d7,
"Wild Aster":0x92316f,
"Wild Axolotl":0x63775a,
"Wild Bamboo":0xeac37e,
"Wild Beet Leaf":0x6b8372,
"Wild Berry":0x7e3a3c,
"Wild Bill Brown":0x795745,
"Wild Blue Yonder":0x7a89b8,
"Wild Boar":0x553322,
"Wild Boysenberry":0x5a4747,
"Wild Brown":0x47241a,
"Wild Caribbean Green":0x1cd3a2,
"Wild Cattail":0x916d5d,
"Wild Chestnut":0xbc5d58,
"Wild Chocolate":0x665134,
"Wild Clary":0x93a3c1,
"Wild Cranberry":0x6e3c42,
"Wild Currant":0x7c3239,
"Wild Dove":0x8b8c89,
"Wild Elderberry":0x545989,
"Wild Forest":0x38914a,
"Wild Geranium":0x986a79,
"Wild Ginger":0x7c4c53,
"Wild Ginseng":0x80805d,
"Wild Grapes":0x5e496c,
"Wild Grass":0x998643,
"Wild Hemp":0x9d7b74,
"Wild Honey":0xeecc00,
"Wild Horse":0x634a40,
"Wild Horses":0x8d6747,
"Wild Iris":0x2f2f4a,
"Wild Lilac":0xbeb8cd,
"Wild Lime":0xc3d363,
"Wild Manzanita":0x684944,
"Wild Maple":0xffe2c7,
"Wild Mulberry":0xa96388,
"Wild Mushroom":0x84704b,
"Wild Mustang":0x695649,
"Wild Nude":0xbeae8a,
"Wild Oats":0xecdbc3,
"Wild Orchid":0xd979a2,
"Wild Orchid Blue":0xb4b6da,
"Wild Pansy":0x6373b4,
"Wild Party":0xb97a77,
"Wild Phlox":0x9ea5c3,
"Wild Pigeon":0x767c6b,
"Wild Plum":0x83455d,
"Wild Poppy":0xb85b57,
"Wild Porcini":0xd6c0aa,
"Wild Primrose":0xebdd99,
"Wild Raisin":0x614746,
"Wild Rice":0xd5bfb4,
"Wild Rider Red":0xdc143c,
"Wild Rose":0xce8498,
"Wild Rye":0xb5a38c,
"Wild Sage":0x7e877d,
"Wild Sand":0xe7e4de,
"Wild Seaweed":0x8a6f45,
"Wild Stallion":0x7c5644,
"Wild Strawberry":0xff3399,
"Wild Thing":0x654243,
"Wild Thistle":0x9e9fb6,
"Wild Thyme":0x7e9c6f,
"Wild Truffle":0x463f3c,
"Wild Watermelon":0xfc6d84,
"Wild West":0x7e5c52,
"Wild Wheat":0xe0e1d1,
"Wild Wilderness":0x91857c,
"Wild Willow":0xbeca60,
"Wild Wisteria":0x686b93,
"Wildcat Grey":0xf5eec0,
"Wilderness":0x8f886c,
"Wilderness Grey":0xc2baa8,
"Wildfire":0xff8833,
"Wildflower":0x927d9b,
"Wildflower Bouquet":0xffb3b1,
"Wildflower Honey":0xc69c5d,
"Wildflower Prairie":0xcccfe2,
"Wildness Mint":0x5d9865,
"Wildwood":0xcdb99b,
"Wilhelminian Pink":0xaa83a4,
"Will":0x179fa6,
"Will O the Wisp":0xd7d8dd,
"William":0x53736f,
"Williams Pear Yellow":0xddc765,
"Willow":0x9a8b4f,
"Willow Blue":0x293648,
"Willow Bough":0x59754d,
"Willow Brook":0xdfe6cf,
"Willow Dyed":0x93b881,
"Willow Green":0xc3cabf,
"Willow Grey":0x817b69,
"Willow Grove":0x69755c,
"Willow Hedge":0x84c299,
"Willow Herb":0xe6dab6,
"Willow Leaf":0xa1a46d,
"Willow Sooty Bamboo":0x5b6356,
"Willow Springs":0xe7e6e0,
"Willow Tree":0x9e8f66,
"Willow Tree Mouse":0xc8d5bb,
"Willow Wood":0x58504d,
"Willow-Flower Yellow":0xf0d29d,
"Willowherb":0x8e4483,
"Willowleaf":0x85877b,
"Willowside":0xf3f2e8,
"Willpower Orange":0xfd5800,
"Wilmington Tan":0xbd9872,
"Wilted Brown":0xab4c3d,
"Wilted Leaf":0xeedac9,
"Wimbledon":0x626d5b,
"Wind Blown":0xdde3e7,
"Wind Blue":0xb1c9df,
"Wind Cave":0x686c7b,
"Wind Chill":0xeff3f0,
"Wind Chimes":0xcac5c2,
"Wind Force":0xd5e2ee,
"Wind Fresh White":0xd0d8cf,
"Wind of Change":0xc8deea,
"Wind Rose":0xe8babd,
"Wind Speed":0xbfd6d9,
"Wind Star":0x6875b7,
"Wind Tunnel":0xc7dfe6,
"Wind Weaver":0xc5d1d8,
"Windchill":0xd5d8d7,
"Windfall":0x84a7ce,
"Windflower":0xbc9ca2,
"Windfresh White":0xded8cf,
"Windgate Hill":0x5b584c,
"Windham Cream":0xf5e6c9,
"Winding Path":0xc6bba2,
"Windjammer":0x62a5df,
"Windmill":0xf5ece7,
"Windmill Park":0xa79b83,
"Windmill Wings":0xf0f1ec,
"Window Box":0xbcafbb,
"Window Grey":0x989ea1,
"Window Pane":0xe4ecdf,
"Windows 95 Desktop":0x018281,
"Windows Blue":0x3778bf,
"Windrift Beige":0xcebcae,
"Windrock":0x5e6c62,
"Windrush":0xdbd3c6,
"Winds Breath":0xe0e1da,
"Windsong":0xf4e4af,
"Windsor":0x462c77,
"Windsor Brown":0xa75502,
"Windsor Greige":0xc4b49c,
"Windsor Haze":0xa697a7,
"Windsor Moss":0x545c4a,
"Windsor Purple":0xc9afd0,
"Windsor Tan":0xcabba1,
"Windsor Toffee":0xccb490,
"Windsor Way":0x9fc9e4,
"Windsor Wine":0x582b36,
"Windstorm":0x6d98c4,
"Windsurf":0x91aab8,
"Windsurf Blue":0x718bae,
"Windsurfer":0xd7e2de,
"Windswept":0xd1f1f5,
"Windswept Beach":0xe3e4e5,
"Windswept Canyon":0xdba480,
"Windswept Leaves":0xb7926b,
"Windwood Spring":0xc2e5e0,
"Windy Blue":0xaabac6,
"Windy City":0x88a3c2,
"Windy Day":0x8cb0cb,
"Windy Meadow":0xb0a676,
"Windy Pine":0x3d604a,
"Windy Seas":0x667f8b,
"Windy Sky":0xe8ebe7,
"Wine":0x80013f,
"Wine Barrel":0xaa5522,
"Wine Bottle":0xd3d6c4,
"Wine Bottle Green":0x254636,
"Wine Brown":0x5f3e3e,
"Wine Cellar":0x70403d,
"Wine Cork":0x866d4c,
"Wine Country":0x602234,
"Wine Crush":0x96837d,
"Wine Dregs":0x673145,
"Wine Frost":0xe5d8e1,
"Wine Goblet":0x643b46,
"Wine Grape":0x941751,
"Wine Gummy Red":0x67334c,
"Wine Leaf":0x355e4b,
"Wine Not":0x864c58,
"Wine Red":0x7b0323,
"Wine Stain":0x69444f,
"Wine Stroll":0x8f7191,
"Wine Tasting":0x492a34,
"Wine Tour":0x653b66,
"Wine Yellow":0xd7c485,
"Wineberry":0x663366,
"Wineshade":0x433748,
"Wing Commander":0x0065ac,
"Wing Man":0x5a6868,
"Winged Victory":0xebe4e2,
"Wingsuit Wind":0xbad5d4,
"Wink":0x7792af,
"Wink Pink":0xede3e7,
"Winner's Circle":0x365771,
"Winning Red":0x894144,
"Winning Ticket":0x636653,
"Winsome Beige":0xe0cfc2,
"Winsome Grey":0xe7e9e4,
"Winsome Hue":0xa7d8e1,
"Winsome Orchid":0xd4b9cb,
"Winsome Rose":0xc28ba1,
"Winter Amethyst":0xb0a6c2,
"Winter Balsam":0x314747,
"Winter Blizzard":0xb8c8d3,
"Winter Bloom":0x47243b,
"Winter Breath":0xdeeced,
"Winter Chill":0x8eced8,
"Winter Chime":0x83c7df,
"Winter Coat":0x45494c,
"Winter Cocoa":0xbaaaa7,
"Winter Could Grey":0x6e7a7c,
"Winter Day":0xe3e7e9,
"Winter Dusk":0xb8b8cb,
"Winter Duvet":0xffffe0,
"Winter | |
<filename>viscid/dipole.py
"""A collection of tools for dipoles"""
# pylint: disable=bad-whitespace
from __future__ import print_function, division
import sys
import numpy as np
import viscid
from viscid import field
from viscid import seed
from viscid.calculator import interp_trilin
# from viscid import vutil
try:
import numexpr as ne # pylint: disable=wrong-import-order
_HAS_NUMEXPR = True
except ImportError:
_HAS_NUMEXPR = False
__all__ = ['guess_dipole_moment', 'make_dipole', 'fill_dipole', 'calc_dip',
'set_in_region', 'make_spherical_mask', 'xyz2lsrlp', 'dipole_map',
'dipole_map_value']
# note that this global is used immutably (ie, not rc file configurable)
DEFAULT_STRENGTH = 1.0 / 3.0574e-5
def guess_dipole_moment(b, r=2.0, strength=DEFAULT_STRENGTH, cap_angle=40,
cap_ntheta=121, cap_nphi=121, plot=False):
"""guess dipole moment from a B field"""
viscid.warn("guess_dipole_moment doesn't seem to do better than 1.6 "
"degrees, you may want to use cotr instead.")
cap = seed.SphericalCap(r=r, angle=cap_angle, ntheta=cap_ntheta,
nphi=cap_nphi)
b_cap = interp_trilin(b, cap)
# FIXME: this doesn't get closer than 1.6 deg @ (theta, mu) = (0, 7.5)
# so maybe the index is incorrect somehow?
idx = np.argmax(viscid.magnitude(b_cap).data)
pole = cap.points()[:, idx]
# FIXME: it should be achievabe to get strength from the magimum magnitude,
# up to the direction
pole = strength * pole / np.linalg.norm(pole)
# # not sure where 0.133 comes from, this is probably not correct
# pole *= 0.133 * np.dot(pole, b_cap.data.reshape(-1, 3)[idx, :]) * r**3
if plot:
from matplotlib import pyplot as plt
from viscid.plot import vpyplot as vlt
vlt.plot(viscid.magnitude(b_cap))
vlt.plot(viscid.magnitude(b_cap), style='contour', levels=10,
colors='k', colorbar=False, ax=plt.gca())
vlt.show()
return pole
def make_dipole(m=(0, 0, -DEFAULT_STRENGTH), strength=None, l=None, h=None,
n=None, twod=False, dtype='f8', nonuniform=False,
crd_system='gse', name='b'):
"""Generate a dipole field with magnetic moment m [x, y, z]"""
if l is None:
l = [-5] * 3
if h is None:
h = [5] * 3
if n is None:
n = [256] * 3
x = np.array(np.linspace(l[0], h[0], n[0]), dtype=dtype)
y = np.array(np.linspace(l[1], h[1], n[1]), dtype=dtype)
z = np.array(np.linspace(l[2], h[2], n[2]), dtype=dtype)
if twod:
y = np.array(np.linspace(-0.1, 0.1, 2), dtype=dtype)
if nonuniform:
z += 0.01 * ((h[2] - l[2]) / n[2]) * np.sin(np.linspace(0, np.pi, n[2]))
B = field.empty([x, y, z], nr_comps=3, name=name, center='cell',
layout='interlaced', dtype=dtype)
B.set_info('crd_system', viscid.as_crd_system(crd_system))
B.set_info('cotr', viscid.dipole_moment2cotr(m, crd_system=crd_system))
return fill_dipole(B, m=m, strength=strength)
def fill_dipole(B, m=(0, 0, -DEFAULT_STRENGTH), strength=None, mask=None):
"""set B to a dipole with magnetic moment m
Args:
B (Field): Field to fill with a dipole
m (ndarray, or datetime64-like): Description
strength (float): if given, rescale the dipole moment
even if it was given explicitly
mask (Field): boolean field as mask, B will be filled where
the mask is True
Returns:
Field: B
"""
# FIXME: should really be taking the curl of a vector field
if mask:
Bdip = field.empty_like(B)
else:
Bdip = B
# Xcc, Ycc, Zcc = B.get_crds_cc(shaped=True) # pylint: disable=W0612
Xv, Yv, Zv = B.get_crds_vector(shaped=True) # pylint: disable=W0612
_crd_lst = [[_x, _y, _z] for _x, _y, _z in zip(Xv, Yv, Zv)]
dtype = B.dtype
one = np.array([1.0], dtype=dtype) # pylint: disable=W0612
three = np.array([3.0], dtype=dtype) # pylint: disable=W0612
if viscid.is_datetime_like(m):
m = viscid.get_dipole_moment(m, crd_system=B)
else:
m = np.asarray(m, dtype=dtype)
if strength is not None:
m = (strength / np.linalg.norm(m)) * m
mx, my, mz = m # pylint: disable=W0612
# geneate a dipole field for the entire grid
# Note: this is almost the exact same as calc_dip, but since components
# are done one-at-a-time, it requires less memory since it copies the
# result of each component into Bdip separately
if _HAS_NUMEXPR:
for i, cn in enumerate("xyz"):
_X, _Y, _Z = _crd_lst[i]
_XI = _crd_lst[i][i]
_mi = m[i]
rsq = ne.evaluate("_X**2 + _Y**2 + _Z**2") # pylint: disable=W0612
mdotr = ne.evaluate("mx * _X + my * _Y + mz * _Z") # pylint: disable=W0612
Bdip[cn] = ne.evaluate("((three * _XI * mdotr / rsq) - _mi) / rsq**1.5")
else:
for i, cn in enumerate("xyz"):
_X, _Y, _Z = _crd_lst[i]
_XI = _crd_lst[i][i]
_mi = m[i]
rsq = _X**2 + _Y**2 + _Z**2
mdotr = mx * _X + my * _Y + mz * _Z
Bdip[cn] = ((three * _XI * mdotr / rsq) - _mi) / rsq**1.5
if mask:
B.data[...] = np.choose(mask.astype('i'), [B, Bdip])
return B
def calc_dip(pts, m=(0, 0, -DEFAULT_STRENGTH), strength=None, crd_system='gse',
dtype=None):
"""Calculate a dipole field at various points
Args:
pts (ndarray): Nx3 array of points at which to calculate the
dipole. Should use the same crd system as `m`
m (sequence, datetime): dipole moment
strength (None, float): If given, rescale m to this magnitude
crd_system (str): Something from which cotr can divine the
coordinate system for both `pts` and `m`. This is only used
if m is given as a datetime and we need to figure out the
dipole moment at a given time in a given crd system
dtype (str, np.dtype): dtype of the result, defaults to
the same datatype as `pts`
Returns:
ndarray: Nx3 dipole field vectors for N points
"""
pts = np.asarray(pts, dtype=dtype)
if len(pts.shape) == 1:
pts = pts.reshape(1, 3)
single_pt = True
else:
single_pt = False
if dtype is None:
dtype = pts.dtype
one = np.array([1.0], dtype=dtype) # pylint: disable=W0612
three = np.array([3.0], dtype=dtype) # pylint: disable=W0612
if viscid.is_datetime_like(m):
m = viscid.get_dipole_moment(m, crd_system=crd_system)
else:
m = np.asarray(m, dtype=dtype)
if strength is not None:
m = (strength / np.linalg.norm(m)) * m
mx, my, mz = m # pylint: disable=W0612
m = m.reshape(1, 3)
# geneate a dipole field for the entire grid
# Note: this is almost the same as fill_dipole, but all components
# are calculated simultaneously, and so this uses more memory
if _HAS_NUMEXPR:
_X, _Y, _Z = pts.T
rsq = ne.evaluate("_X**2 + _Y**2 + _Z**2") # pylint: disable=W0612
mdotr = ne.evaluate("mx * _X + my * _Y + mz * _Z") # pylint: disable=W0612
Bdip = ne.evaluate("((three * pts * mdotr / rsq) - m) / rsq**1.5")
else:
_X, _Y, _Z = pts.T
rsq = _X**2 + _Y**2 + _Z**2
mdotr = mx * _X + my * _Y + mz * _Z
Bdip = ((three * pts * mdotr / rsq) - m) / rsq**1.5
if single_pt:
Bdip = Bdip[0, :]
return Bdip
def set_in_region(a, b, alpha=1.0, beta=1.0, mask=None, out=None):
"""set `ret = alpha * a + beta * b` where mask is True"""
alpha = np.asarray(alpha, dtype=a.dtype)
beta = np.asarray(beta, dtype=a.dtype)
a_dat = a.data if isinstance(a, viscid.field.Field) else a
b_dat = b.data if isinstance(b, viscid.field.Field) else b
b = None
if _HAS_NUMEXPR:
vals = ne.evaluate("alpha * a_dat + beta * b_dat")
else:
vals = alpha * a_dat + beta * b_dat
a_dat = b_dat = None
if out is None:
out = field.empty_like(a)
if mask is None:
out.data[...] = vals
else:
if hasattr(mask, "nr_comps") and mask.nr_comps:
mask = mask.as_centered(a.center).as_layout(a.layout)
try:
out.data[...] = np.choose(mask, [out.data, vals])
except ValueError:
out.data[...] = np.choose(mask.data.reshape(list(mask.sshape) + [1]),
[out.data, vals])
return out
def make_spherical_mask(fld, rmin=0.0, rmax=None, rsq=None):
"""make a mask that is True between rmin and rmax"""
if rmax is None:
rmax = np.sqrt(0.9 * np.finfo('f8').max)
if True and fld.nr_comps and fld.center.lower() in ('edge', 'face'):
mask = np.empty(fld.shape, dtype='bool')
Xv, Yv, Zv = fld.get_crds_vector(shaped=True) # pylint: disable=W0612
_crd_lst = [[_x, _y, _z] for _x, _y, _z in zip(Xv, Yv, Zv)]
# csq = [c**2 for c in fld.get_crds_vector(shaped=True)]
for i in range(3):
rsq = np.sum([c**2 for c in _crd_lst[i]], axis=0)
_slc = [slice(None)] * len(fld.shape)
_slc[fld.nr_comp] = i
mask[_slc] = np.bitwise_and(rsq >= rmin**2, rsq < rmax**2)
return fld.wrap_field(mask, dtype='bool')
else:
rsq = np.sum([c**2 for c in fld.get_crds(shaped=True)], axis=0)
mask = np.bitwise_and(rsq >= rmin**2, rsq < rmax**2)
if fld.nr_comps:
fld = fld['x']
return fld.wrap_field(mask, dtype='bool')
def _precondition_pts(pts):
"""Make sure pts are a 2d ndarray with length 3 in 1st dim"""
pts = np.asarray(pts)
if len(pts.shape) == 1:
pts = pts.reshape((3, 1))
return pts
def xyz2lsrlp(pts, cotr=None, crd_system='gse'):
"""Ceovert x, y, z -> l-shell, r, lambda, phi [sm coords]
- r, theta, phi = viscid.cart2sph(pts in x, y, z)
- lambda = 90deg - theta
- r = L cos^2(lambda)
Args:
pts (ndarray): 3xN for N (x, y, z) points
cotr (None): if given, use cotr to perform mapping to / from sm
| |
"""
run the below command under 'activity_recognition' folder:
PYTHONPATH=../:./ python3 models/classical/detector_feature_A_mirror-2.py
"""
import os
import shutil
from collections import Counter
from shutil import copyfile
import cv2
import numpy as np
import sklearn
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KernelDensity
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from ar.features import feature
from ar.features.feature import extract_feature_average, generate_data, extract_video_feature, \
extract_feature_sliding_window, extract_feature_sampling, load_data, dump_data
from ar.features.cnn.model_tf import CNN_tf
from ar.features.cnn.utils import load_video
from ar.features.cnn.video import trim
def _extract_video_feature(model, in_file, out_dir):
# in_file = 'data/data-clean/refrigerator/open_close_fridge/1/open_close_fridge_3_1615392727_2.mkv'
video_name = os.path.splitext(os.path.basename(in_file))[0]
out_file = os.path.join(out_dir, '{}_{}.npy'.format(video_name, model.net_name))
if os.path.exists(out_file):
return out_file
if not os.path.exists(out_dir):
os.makedirs(out_dir)
batch_sz = 32
# sampling: only extract the first frame in each second from the video.
video_tensor = load_video(in_file, model.desired_size)
# extract features
features = model.extract(video_tensor, batch_sz)
# save features
np.save(os.path.splitext(out_file)[0], features)
return out_file
def _mirror_video(in_file, out_dir):
"""
https://stackoverflow.com/questions/29317262/opencv-video-saving-in-python
https://docs.opencv.org/4.5.2/dd/d43/tutorial_py_video_display.html
https://stackoverflow.com/questions/61659346/how-to-get-4-character-codec-code-for-videocapture-object-in-opencv
Parameters
----------
in_file
out_dir
Returns
-------
# in_file = 'data/data-clean/refrigerator/open_close_fridge/1/open_close_fridge_3_1615392727_2.mkv'
"""
_, file_name = os.path.split(in_file)
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# copyfile(in_file, os.path.join(out_dir, file_name))
file_name, ent = file_name.split('.')
out_file = os.path.join(out_dir, file_name + '-mirrored.' + ent)
if os.path.exists(out_file):
return out_file
# capture video
cap = cv2.VideoCapture(in_file)
# Get the Default resolutions
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
# fourcc = cv2.VideoWriter_fourcc(
# *f"{fourcc & 255:c},{(fourcc >> 8) & 255:c}, {(fourcc >> 16) & 255:c}, {(fourcc >> 24) & 255:c}")
fourcc = cv2.VideoWriter_fourcc(*(chr(fourcc & 0xff) + chr((fourcc >> 8) & 0xff) + chr((fourcc >> 16) & 0xff)
+ chr((fourcc >> 24) & 0xff)))
# print(fourcc)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# Define the codec and filename.
fps = cap.get(cv2.CAP_PROP_FPS)
# out = cv2.VideoWriter(out_file, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (frame_width, frame_height))
try:
out = cv2.VideoWriter(out_file, fourcc, fps, (frame_width, frame_height), isColor=True)
except Exception as e:
print(e)
while True:
ret, img = cap.read()
# print(ret)
if ret:
# cv2.imshow('Original Video',img)
# flip for truning(fliping) frames of video
img2 = cv2.flip(img, 1) # Horizontal
# cv2.imshow('Flipped video',img2)
out.write(img2)
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
print(f'_mirror_video: {out_file}')
return out_file
class AnomalyDetector:
def __init__(self, model_name='GMM', model_parameters={}, random_state=42):
self.model_name = model_name
self.random_state = random_state
self.model_parameters = model_parameters
def fit(self, X_train, y_train=None):
# 1. preprocessing
# 2. build models
if self.model_name == 'KDE':
self.model = KernelDensity(kernel='gaussian', bandwidth=0.5)
self.model.fit(X_train)
elif self.model_name == 'GMM':
pass
elif self.model_name == 'DT':
self.model = DecisionTreeClassifier(random_state=self.random_state)
self.model.fit(X_train, y_train)
elif self.model_name == 'RF':
n_estimators = self.model_parameters['n_estimators']
self.model = RandomForestClassifier(n_estimators, random_state=self.random_state)
self.model.fit(X_train, y_train)
elif self.model_name == 'SVM':
kernel = self.model_parameters['kernel']
self.model = sklearn.svm.SVC(kernel=kernel, random_state=self.random_state)
self.model.fit(X_train, y_train)
elif self.model_name == 'OvRLogReg':
C = self.model_parameters['C']
self.model = OneVsRestClassifier(
LogisticRegression(C=C, random_state=self.random_state, solver='liblinear'))
self.model.fit(X_train, y_train)
def get_threshold(self, X_train, q=0.95):
# 3. anomaly theadhold: t
log_dens = self.model.score_samples(X_train)
self.thres = np.quantile(np.exp(log_dens), q=q)
def predict_prob(self, X):
log_dens = self.model.score_samples(X)
return np.exp(log_dens)
def predict(self, X):
dens = self.predict_prob(X)
dens[dens < self.thres] = 1
dens[dens >= self.thres] = 0
return dens
def get_X_y(Xs, ys):
X = []
Y = []
for f, y in zip(Xs, ys):
x = extract_feature_average(f)
X.extend(x)
Y.append(y)
return Xs, np.asarray(X), np.asarray(Y)
def split_train_test_npy(meta, test_size=0.3, is_mirror_test_set=False, random_state=42):
X = [] # doesn't include 'mirrored' npy
y = [] # doesn't include 'mirrored' npy
X_mirrored = []
y_mirrored = []
for x_, y_ in zip(meta['X'], meta['y']):
if 'mirrored_vgg.npy' not in x_:
X.append(x_)
y.append(y_)
# to make X and X_mirriored have the same order.
ent = '_vgg.npy'
new_x_ = x_[:-len(ent)] + '-mirrored' + ent
# print(x_, new_x_)
X_mirrored.append(new_x_)
y_mirrored.append(y_)
X, y = get_X_y(X, y) # extract features from 'npy' files
# print(meta['in_dir'], ', its shape:', meta['shape'])
# print(f'mapping-(activity:(label, cnt)): ', '\n\t' + '\n\t'.join([f'{k}:{v}' for k, v in meta['labels'].items()]))
# mp = {v[0]: k for k, v in meta['labels'].items()} # idx -> activity name
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
X_mirrored, y_mirrored = get_X_y(X_mirrored, y_mirrored) # extract features from 'npy' files
X_mirrored_train, X_mirrored_test, y_mirrored_train, y_mirrored_test = \
train_test_split(X_mirrored, y_mirrored, test_size=test_size, random_state=random_state)
X_train = np.concatenate([X_train, X_mirrored_train], axis=0)
y_train = np.concatenate([y_train, y_mirrored_train], axis=0)
if is_mirror_test_set:
X_test = np.concatenate([X_test, X_mirrored_test], axis=0)
y_test = np.concatenate([y_test, y_mirrored_test], axis=0)
print(f'X_train: {X_train.shape}\nX_test: {X_test.shape}')
print(f'X_train: {X_train.shape}, y_train: {sorted(Counter(y_train).items(), key=lambda x: x[0])}')
print(f'X_train: {X_test.shape}, y_test: {sorted(Counter(y_test).items(), key=lambda x: x[0])}')
return X_train, X_test, y_train, y_test
def augment_train(train_meta, augment_type='camera_1+camera_2+camera_3', is_mirror=False):
X_meta = []
X = []
Y = []
if augment_type == 'camera_1+camera_2+camera_3':
# combine all camera data, but without mirrored data
for name, train in train_meta.items():
cnt = 0
for vs in train:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_train: {cnt}')
elif augment_type == 'camera_1+camera_2':
# combine all camera data, but without mirrored data
for name, train in train_meta.items():
if name == 'camera_3': continue
cnt = 0
for vs in train:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
cnt += len(x)
print(f'{name}_train: {cnt}')
elif augment_type == 'camera_1' or augment_type == 'camera_2' or augment_type == 'camera_3':
# only use camera_i data
for name, train in train_meta.items():
if name != augment_type: continue
for vs in train:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
else:
msg = augment_type
raise ValueError(msg)
return X_meta, np.asarray(X), np.asarray(Y)
def augment_test(test_meta, augment_type='camera_1+camera_2+camera_3', is_mirror=False):
X_meta = []
X = []
Y = []
if augment_type == 'camera_1+camera_2+camera_3':
# combine all camera data, but without mirrored data
for name, test in test_meta.items():
for vs in test:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
elif augment_type == 'camera_1' or augment_type == 'camera_2' or augment_type == 'camera_3':
# only use camera_i data
for name, test in test_meta.items():
if name != augment_type: continue
for vs in test:
video_path, cnn_feature, y_label, y_idx, x = vs
if len(x) == 0: continue
if not is_mirror and '-mirrored' in video_path: continue
X.extend(x)
Y.extend(len(x) * [y_idx])
X_meta.extend(len(x) * [video_path])
else:
msg = augment_type
raise ValueError(msg)
return X_meta, np.asarray(X), np.asarray(Y)
def tsne_plot(X, y, y_label, random_state=42):
"""
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
tsne_plot(X[:, :2], X[:, 2])
Parameters
----------
X
y
Returns
-------
"""
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
X_embedded = TSNE(n_components=2, random_state=random_state).fit_transform(X)
# df = pd.DataFrame(np.concatenate([X_embedded, np.reshape(y, (-1, 1))], axis=1), columns=['x1', 'x2', 'y'])
# print(df.head(5))
# g = sns.scatterplot(data=df, x="x1", y="x2", hue="y", palette="deep")
# # g.set(xticklabels=[])
# # g.set(yticklabels=[])
# plt.show()
df = pd.DataFrame(np.concatenate([X_embedded, np.reshape(y, (-1, 1)), np.reshape(y_label, (-1, 1))], axis=1),
columns=['x1', 'x2', 'y', 'y_label'])
df = df.astype({"x1": float, "x2": float, 'y': int, 'y_label': str})
print(df.info())
print(df.head(5))
print(df.describe())
g = sns.scatterplot(data=df, x="x1", y="x2", hue="y_label", palette='deep', s=50, alpha=0.3)
g.set_title('Refrigerator')
# Put the legend out of the figure
# Note that the (1.05, 1) coordinates correspond to the (x, y) coordinates where the legend should be placed
# and the borderaxespad specifies the padding between the axes and the border legend.
# bbox (x, y, width, height)
g.legend(loc='upper left', bbox_to_anchor=(1.05, 1.0), ncol=1, borderaxespad=0,
fancybox=False, shadow=False, fontsize=8, title='classes')
# g.legend(loc='center left', bbox_to_anchor=(1.25, 1), ncol=1, borderaxespad=0.)
plt.tight_layout()
plt.show()
### FacetGrid
grid = sns.FacetGrid(df, col="y_label", hue="y_label", hue_order=list(sorted(set(y_label))), col_wrap=3)
grid.map(sns.scatterplot, "x1", "x2", s=100, alpha=0.3)
grid.add_legend()
plt.show()
### 3D
X_embedded = TSNE(n_components=3, random_state=random_state).fit_transform(X)
df = pd.DataFrame(np.concatenate([X_embedded, np.reshape(y, (-1, 1)), np.reshape(y_label, (-1, 1))], axis=1),
columns=['x1', 'x2', 'x3', 'y', 'y_label'])
df = df.astype({"x1": float, "x2": float, "x3": float, 'y': int, 'y_label': str})
sns.set(style="white")
# fig = plt.figure()
# ax = fig.add_subplot(111, projection = '3d')
# ax.scatter(df['x1'], df['x2'], df['x3'])
# plt.show()
# axes instance
fig = plt.figure(figsize=(5, 5))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
# plot
# get colormap from seaborn
cmap = ListedColormap(sns.color_palette("deep", 5).as_hex())
sc = ax.scatter(df['x1'], df['x2'], df['x3'], s=40, c=df['y'].values, marker='o', cmap=cmap, alpha=1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# legend
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc='upper left')
# plt.legend(*sc.legend_elements())
# plt.tight_layout(pad = | |
"""
Defines the project API
"""
import endpoints, re
from protorpc import message_types
from django.contrib.auth import get_user_model
from django.db.models import Q, Count
from django.conf import settings
from django.utils import timezone
from greenday_core import eventbus
from greenday_core.memoize_cache import cache_manager
from greenday_core.api_exceptions import (
BadRequestException, ForbiddenException, NotFoundException)
from greenday_core.constants import EventKind
from greenday_core.models import (
Project,
ProjectUser,
Video,
VideoTagInstance,
PendingUser,
YouTubeVideo
)
from greenday_core.email_templates import (
NEW_USER_INVITED,
EXISTING_USER_INVITED
)
from greenday_core.utils import (
get_gcs_image_serving_url,
send_email
)
from ..api import (
BaseAPI,
greenday_api,
greenday_method,
auth_required
)
from ..utils import (
get_obj_or_api_404,
update_object_from_request,
patch_object_from_request,
api_appevent
)
from ..common.containers import IDContainer
from .caching import remove_project_list_user_cache
from .messages import (
ProjectRequestMessage,
ProjectResponseMessage,
ProjectResponseMessageSlim,
ProjectListResponse,
ProjectCollaboratorsResponseMessage,
ProjectUserListResponse,
ProjectUpdatesMessage,
ProjectUpdateCountsMessage,
ProjectStatsMessage,
TagCountMessage,
DistinctChannelListResponse,
DistinctChannelMessage
)
from .mixins import ProjectAPIMixin
from .mappers import (
ProjectMapper, GenericProjectUpdateMapper, ProjectCollaboratorMapper)
# Custom request containers for all API methods
from .containers import (
ProjectListRequest,
ProjectUpdateEntityContainer,
ProjectUserEntityContainer,
ProjectUserIDEntityContainer,
ProjectIDContainer,
)
"""
In order to extract multiple emails that might have been mistakenly added
"""
def get_emails(s):
regex = re.compile(("([a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`"
"{|}~-]+)*(@|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|"
"\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)"))
"""Returns an iterator of matched emails found in string s."""
# Removing lines that start with '//' because the regular expression
# mistakenly matches patterns like 'http://<EMAIL>' as '//<EMAIL>'.
return (email[0] for email in re.findall(regex, s) if not email[0].startswith('//'))
# PROJECT
@greenday_api.api_class(
resource_name='project', auth_level=endpoints.AUTH_LEVEL.REQUIRED)
class ProjectAPI(BaseAPI, ProjectAPIMixin):
"""
API for Projects
Object disposed after request is completed.
"""
PROJECT_LIST_QUERYSET = (
Project.objects
.prefetch_related("projectusers__user")
.with_videos()
)
def __init__(self, *args, **kwargs):
"""
Creates the project API
"""
super(ProjectAPI, self).__init__(*args, **kwargs)
self.mapper = ProjectMapper(Project, ProjectResponseMessage)
self.slim_mapper = ProjectMapper(Project, ProjectResponseMessageSlim)
self.user_mapper = ProjectCollaboratorMapper(
ProjectUser, ProjectCollaboratorsResponseMessage)
self.project_update_mapper = GenericProjectUpdateMapper()
@greenday_method(ProjectListRequest, ProjectListResponse,
path='project', http_method='GET', name='myprojects',
pre_middlewares=[auth_required])
def project_list_user(self, request):
"""
Lists all projects that the current user is assigned to
"""
def _project_list_user(user, pending):
qry = self.PROJECT_LIST_QUERYSET.get_projects_for(self.current_user)
if pending is not None:
qry = qry.filter(
Q(projectusers__user=user) &
Q(projectusers__is_pending=pending)
)
items = [
self.slim_mapper.map(p, current_user=user)
for p in qry
]
return ProjectListResponse(items=items, is_list=True)
return cache_manager.get_or_set(
_project_list_user,
self.current_user,
request.pending,
message_type=ProjectListResponse)
@greenday_method(IDContainer, ProjectResponseMessage,
path='project/{id}', http_method='GET', name='get',
pre_middlewares=[auth_required])
def project_get(self, request):
"""
Gets a single project
"""
project = self.get_project(
request.id,
assigned_only=True
)
project.fill_prefetch_cache("collections", project.collections.all())
project.fill_prefetch_cache(
"projectusers", project.projectusers.select_related("user"))
project.fill_prefetch_cache(
"projecttags",
(
project.projecttags
.select_related("global_tag")
.with_taginstance_sum()
)
)
return self.mapper.map(project, current_user=self.current_user)
@greenday_method(ProjectRequestMessage, ProjectResponseMessage,
path='project', http_method='POST', name='insert',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.PROJECTCREATED,
id_getter_post=lambda s, req, res: res.id,
project_id_getter_post=lambda s, req, res: res.id,)
def project_insert(self, request):
"""
Creates a new project
"""
if not self.current_user.is_whitelisted:
raise ForbiddenException(
"User must be whitelisted to create projects")
project = Project.objects.create(
name=request.name,
description=request.description,
image_url=request.image_url,
image_gcs_filename=request.image_gcs_filename,
privacy_project=request.privacy_project or Project.PRIVATE,
privacy_tags=request.privacy_tags or Project.PUBLIC
)
project.set_owner(self.current_user)
# cheeky hack to prevent further DB queries
project.fill_prefetch_cache("collections", project.collections.none())
project.fill_prefetch_cache("projecttags", project.projecttags.none())
project.fill_prefetch_cache("videotags", project.videotags.none())
project.fill_prefetch_cache("projectusers", project.projectusers.all())
remove_project_list_user_cache(self.current_user)
return self.mapper.map(project, current_user=self.current_user)
@greenday_method(ProjectUpdateEntityContainer, ProjectResponseMessage,
path='project/{id}', http_method='PUT', name='update',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.PROJECTUPDATED,
id_getter=lambda s, req: req.id,
project_id_getter=lambda s, req: req.id)
def project_update(self, request):
"""
Updates a project
"""
project = self.get_project(
request.id,
check_fn=lambda p: p.is_owner_or_admin(self.current_user)
)
update_object_from_request(request, project)
project.fill_prefetch_cache("collections", project.collections.all())
project.fill_prefetch_cache(
"projectusers", project.projectusers.select_related("user"))
project.fill_prefetch_cache(
"projecttags",
(
project.projecttags
.select_related("global_tag")
.with_taginstance_sum()
)
)
remove_project_list_user_cache(self.current_user)
return self.mapper.map(project, current_user=self.current_user)
@greenday_method(ProjectUpdateEntityContainer, ProjectResponseMessage,
path='project/{id}', http_method='PATCH', name='patch',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.PROJECTUPDATED,
id_getter=lambda s, req: req.id,
project_id_getter=lambda s, req: req.id)
def project_patch(self, request):
"""
Patches a project
"""
project = self.get_project(
request.id,
check_fn=lambda p: p.is_owner_or_admin(self.current_user)
)
patch_object_from_request(request, project)
project.fill_prefetch_cache("collections", project.collections.all())
project.fill_prefetch_cache(
"projectusers", project.projectusers.select_related("user"))
project.fill_prefetch_cache(
"projecttags",
(
project.projecttags
.select_related("global_tag")
.with_taginstance_sum()
)
)
remove_project_list_user_cache(self.current_user)
return self.mapper.map(project, current_user=self.current_user)
@greenday_method(IDContainer, message_types.VoidMessage,
path='project/{id}', http_method='DELETE', name='delete',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.PROJECTDELETED,
id_getter=lambda s, req: req.id,
project_id_getter=lambda s, req: req.id)
def project_delete(self, request):
"""
Deletes a project
"""
project = self.get_project(
request.id, check_fn=lambda p: p.owner == self.current_user)
project.delete(trash=False)
remove_project_list_user_cache(self.current_user)
return message_types.VoidMessage()
@greenday_method(IDContainer, ProjectResponseMessage,
path='project/{id}/restore', http_method='POST',
name='restore', pre_middlewares=[auth_required])
@api_appevent(
EventKind.PROJECTRESTORED,
id_getter=lambda s, req: req.id,
project_id_getter=lambda s, req: req.id)
def project_restore(self, request):
"""
Restores a deleted project
"""
try:
project = Project.trash.get(pk=request.id)
except Project.DoesNotExist:
raise NotFoundException(
"{0} instance with {1} not found".format(
Project.__name__, 'pk=%s' % request.id))
if (not self.current_user.is_superuser and
self.current_user != project.owner):
raise ForbiddenException
project.restore()
project.fill_prefetch_cache("collections", project.collections.all())
project.fill_prefetch_cache(
"projectusers", project.projectusers.select_related("user"))
remove_project_list_user_cache(self.current_user)
return self.mapper.map(project, current_user=self.current_user)
@greenday_method(ProjectIDContainer, ProjectUserListResponse,
path='project/{project_id}/users', http_method='GET',
name='users', pre_middlewares=[auth_required])
def project_users(self, request):
"""
Gets all users assigned to a project
"""
project = self.get_project(
request.project_id,
assigned_only=True
)
query = project.projectusers.select_related('user').all()
if not (
self.current_user.is_superuser or
project.is_owner_or_admin(self.current_user)
):
query = query.filter(is_pending=False)
return ProjectUserListResponse(
items=map(self.user_mapper.map, query),
is_list=True)
@greenday_method(ProjectUserEntityContainer, ProjectUserListResponse,
path='project/{project_id}/users', http_method='POST',
name='add_user', pre_middlewares=[auth_required])
def project_add_user(self, request):
project_users = []
for email in get_emails(request.email):
"""
Adds a user to the project
"""
project = self.get_project(
request.project_id,
check_fn=lambda p: p.is_owner_or_admin(self.current_user)
)
def send_invite_email():
send_email(
"You've been invited to collaborate on a Montage project",
EXISTING_USER_INVITED.format(
project_name=project.name,
home_link='https://montage.meedan.com'),
email
)
User = get_user_model()
project_user = None
try:
user = User.objects.get(email=email)
project_user = (project.add_admin
if request.as_admin
else project.add_assigned)(user)
eventbus.publish_appevent(
kind=EventKind.USERINVITEDASPROJECTADMIN
if request.as_admin else EventKind.USERINVITEDASPROJECTUSER,
object_id=user.pk,
project_id=project.pk,
user=self.current_user
)
send_invite_email()
except User.DoesNotExist:
pending_user, created = User.objects.get_or_create(
email=email,
username=email)
project_user = (project.add_admin
if request.as_admin
else project.add_assigned)(pending_user)
eventbus.publish_appevent(
kind=EventKind.PENDINGUSERINVITEDASPROJECTADMIN
if request.as_admin
else EventKind.PENDINGUSERINVITEDASPROJECTUSER,
object_id=pending_user.pk,
project_id=project.pk,
meta=pending_user.email,
user=self.current_user
)
if created:
send_email(
"You've been invited to join Montage",
NEW_USER_INVITED.format(
project_name=project.name,
home_link='https://montage.meedan.com'),
pending_user.email
)
else:
send_invite_email()
project_users.append(project_user)
return ProjectUserListResponse(
items=map(self.user_mapper.map, project_users),
is_list=True)
# return self.user_mapper.map(project_users)
@greenday_method(ProjectUserIDEntityContainer, message_types.VoidMessage,
path='project/{project_id}/users/{id}',
http_method='DELETE',
name='remove_user',
pre_middlewares=[auth_required])
def project_remove_user(self, request):
"""
Removes a user from the project
id: The ProjectUser ID
"""
project = self.get_project(
request.project_id,
check_fn=lambda p: p.is_owner_or_admin(self.current_user)
)
projectuser = get_obj_or_api_404(ProjectUser, pk=request.id)
projectuser.delete()
if projectuser.user_id:
eventbus.publish_appevent(
kind=EventKind.USERREMOVED,
object_id=projectuser.user_id,
project_id=project.pk,
user=self.current_user
)
elif projectuser.pending_user_id:
eventbus.publish_appevent(
kind=EventKind.PENDINGUSERREMOVED,
object_id=projectuser.pending_user_id,
project_id=project.pk,
user=self.current_user
)
return message_types.VoidMessage()
@greenday_method(ProjectIDContainer, ProjectUpdatesMessage,
path='project/my/{project_id}/updates',
http_method='GET', name='my_project_updates',
pre_middlewares=[auth_required])
def project_updates_user(self, request):
"""
Gets a list of all project updates that the user has not yet seen
"""
project = self.get_project(
request.project_id, assigned_only=True
)
user_relation = project.get_user_relation(self.current_user)
if not user_relation:
raise ForbiddenException(
"User has no relationship with this project")
events, objects = eventbus.get_events_with_objects(
user_relation.last_updates_viewed,
kind=(
EventKind.VIDEOCREATED,
EventKind.USERACCEPTEDPROJECTINVITE,
),
project_id=request.project_id
)
items = {
"created_videos": [],
"users_joined": []
}
model_type_to_event_type = {
"video": "created_videos",
"user": "users_joined"
}
for model_type, models in objects.items():
for model in models:
# assuming that there should only be one event per object
event = next(
(e for e in events if e.object_id == model.id), None)
update_message = self.project_update_mapper.map(model, event)
items[model_type_to_event_type[model_type]].append(
update_message)
return ProjectUpdatesMessage(**items)
@greenday_method(ProjectIDContainer, ProjectUpdateCountsMessage,
path='project/my/{project_id}/updates/counts',
http_method='GET', name='my_project_update_counts',
pre_middlewares=[auth_required])
def project_update_counts_user(self, request):
"""
Gets a count of all project updates that the user has not seen
"""
project = self.get_project(
request.project_id, assigned_only=True
)
user_relation = project.get_user_relation(self.current_user)
if not user_relation:
raise ForbiddenException(
"User has no relationship with this project")
event_counts = eventbus.get_event_counts(
user_relation.last_updates_viewed,
kind=(
EventKind.VIDEOCREATED,
EventKind.USERACCEPTEDPROJECTINVITE,
),
project_id=request.project_id)
event_type_to_model_type = {
"created_videos": "video",
"users_joined": "user"
}
items = {
event_type: event_counts.get(model_type, 0)
for event_type, model_type in event_type_to_model_type.items()
}
return ProjectUpdateCountsMessage(**items)
@greenday_method(
ProjectIDContainer,
message_types.VoidMessage,
path='project/my/{project_id}/update-last-viewed',
http_method='POST',
name='update_last_viewed',
pre_middlewares=[auth_required])
def update_last_viewed(self, request):
"""
Updates the timestamp at which the user last viewed project updates
"""
project = self.get_project(
request.project_id, assigned_only=True
)
project.set_updates_viewed(self.current_user)
return message_types.VoidMessage()
@greenday_method(
ProjectIDContainer,
ProjectResponseMessage,
path='project/my/{project_id}/accept',
http_method='POST',
name='accept_project_invitation',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.USERACCEPTEDPROJECTINVITE,
id_getter=lambda s, req: s.current_user.id,
project_id_getter=lambda s, req: req.project_id)
def accept_project_invitation(self, request):
"""
Accepts an invitation to be assigned to a project
"""
project = self.get_project(request.project_id)
projectuser = project.get_user_relation(self.current_user)
if not projectuser:
raise ForbiddenException
if not projectuser.is_pending:
raise BadRequestException("User is not pending")
projectuser.is_pending = False
projectuser.last_updates_viewed = timezone.now()
projectuser.save()
project.fill_prefetch_cache(
"projectusers", project.projectusers.select_related("user").all())
remove_project_list_user_cache(self.current_user)
return self.mapper.map(project, current_user=self.current_user)
@greenday_method(
ProjectIDContainer,
message_types.VoidMessage,
path='project/my/{project_id}/reject',
http_method='POST',
name='reject_project_invitation',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.USERREJECTEDPROJECTINVITE,
id_getter=lambda s, req: s.current_user.id,
project_id_getter=lambda s, req: req.project_id)
def reject_project_invitation(self, request):
"""
Rejects an invitation to be assigned to a project
"""
project = self.get_project(request.project_id)
projectuser = project.get_user_relation(self.current_user)
if not projectuser:
raise ForbiddenException
if not projectuser.is_pending:
raise BadRequestException("User is not pending")
projectuser.delete()
remove_project_list_user_cache(self.current_user)
# TODO: email?
return message_types.VoidMessage()
@greenday_method(
ProjectIDContainer,
ProjectStatsMessage,
path='project/{project_id}/stats',
http_method='GET',
name='get_project_stats',
pre_middlewares=[auth_required])
def get_project_stats(self, request):
"""
Gets stats on videos and tag counts for the project
"""
project = self.get_project(request.project_id)
def _get_project_stats(project):
total_tags = VideoTagInstance.objects.filter(
video_tag__project=project).count()
tag_name_path = "video_tag__project_tag__global_tag__name"
tag_name_count_path = "{0}__count".format(tag_name_path)
tags = (
VideoTagInstance.objects
.filter(video_tag__project=project)
.values(tag_name_path)
.annotate(Count(tag_name_path))
.order_by("-" + tag_name_count_path)
)[:20]
return {
"total_videos": (
Video.non_trashed_objects
.filter(project=project)
.count()
),
"archived_videos": Video.trash.filter(project=project).count(),
"favourited_videos": (
Video.objects
.filter(project=project)
.filter(favourited=True)
.count()
),
"video_tags": total_tags,
"watched_videos": (
Video.objects
.values("related_users")
.filter(related_users__watched=True)
.annotate(Count('related_users'))
.filter(related_users__count__gt=0)
.count()
),
"total_tags": total_tags,
"top_tags": [
TagCountMessage(
name=tag_obj[tag_name_path],
count=tag_obj[tag_name_count_path]
) for tag_obj in tags]
}
items = cache_manager.get_or_set(
_get_project_stats,
project)
return ProjectStatsMessage(**items)
@greenday_method(
ProjectIDContainer,
DistinctChannelListResponse,
path='project/{project_id}/distinct_channels',
http_method='GET',
name='distinct_channels',
pre_middlewares=[auth_required])
def get_distinct_channels(self, request):
"""
Gets a distinct list of channels from videos in this project
"""
project | |
the beginning -
# not after creating interaction terms and weeding out collinear variables?
# =============================================================================
data = data1_cl
oversampling_data = data.copy(deep=True)
for i in range(0,1):
oversampling_data = pd.concat([oversampling_data,oversampling_data[oversampling_data["poi"] == 1]],axis=0)
oversampling_data["poi"].value_counts()
oversampling_data.info()
# now we have a rather balanced dataset, lets look at the impact of using the same data multiple times...
# results for different numbers of POIs below
#%%
data1_cl.index
#%%
from imblearn import over_sampling
#data = scaled_data
data = data1_cl
predictors = data.drop(target,axis=1).columns
smote_item = over_sampling.BorderlineSMOTE()
X_re, y_re = smote_item.fit_resample(data[predictors],data[target])
X_re = pd.DataFrame(X_re)
y_re = pd.DataFrame(y_re)
X_re.columns=predictors
y_re.columns=[target]
oversampling_blsmote = pd.concat([X_re, y_re],axis=1)
oversampling_blsmote.sample(5)
#%%
oversampling_blsmote.info()
#%%
oversampling_blsmote["poi"].value_counts()
"""
#%%
data = data1_cl
#data1 = train
#data = oversampling_blsmote
predictors = data.drop(target,axis=1).columns
index = data.index # need to conserve index - dont need index for smote
scaler = MinMaxScaler(feature_range=(0.0001,1))
scaler.fit(data[predictors])
scaled_data = scaler.transform(data[predictors])
scaled_data = pd.DataFrame(scaled_data, columns = predictors, index = index) # no need for smote
#scaled_data = pd.DataFrame(scaled_data, columns = predictors)
scaled_data = pd.concat([scaled_data, data[target]], axis=1)
#%%
scaled_data.sample(10) # works
# now we can apply some automated feature engineering
#%%
scaled_data.info()
#%%
### Task 3: Create new feature(s) # apply this on scaled features and do it automatically.
# Why? We do dozens of combinations and we want the importance of each feature to be comparable to all of the other features individually.
# For example, we think about combining features like "Number of total emails written by person" and "Different unique persons our person wrote to",
# the former feature value will be much higher than number of different persons that person wrote to,
# so we need to scale or otherwise lose the informative value of number of different persons when combining those two features
# we will use automatic feature creation since we are lazy and remove redundant and correlated features with feature selection mechanics.
# create interaction terms for classification
def create_interaction_variables(data, columns):
numerics = data.loc[:, columns] # apply this only on numeric columns without target column
# for each pair of variables, determine which mathmatical operators to use
for i in range(0, numerics.columns.size-1):
for j in range(0, numerics.columns.size-1):
col1 = str(numerics.columns.values[i])
col2 = str(numerics.columns.values[j])
# multiply fields together (we allow values to be squared)
if i <= j:
name = col1 + "*" + col2
data = pd.concat([data, pd.Series(numerics.iloc[:,i] * numerics.iloc[:,j], name = name)], axis = 1)
# add fields together
if i < j:
name = col1 + "+" + col2
data = pd.concat([data, pd.Series(numerics.iloc[:,i] + numerics.iloc[:,j], name=name)], axis = 1)
# divide and subtract fields from each other
if not i == j:
name = col1 + "/" + col2
data = pd.concat([data, pd.Series(numerics.iloc[:,i] / numerics.iloc[:,j], name = name)], axis = 1)
name = col1 + "-" + col2
data = pd.concat([data, pd.Series(numerics.iloc[:,i] - numerics.iloc[:,j], name= name)], axis = 1)
print("Column {} done, moving on to next column.".format(col1))
return data
predictors = scaled_data.drop(target,axis=1).columns
scaled_data = create_interaction_variables(scaled_data, predictors)
#%%
scaled_data.info()
#1066 columns, so created roughly 1045
#%%
scaled_data.sample(10)
#%%
# opt to drop the columns with nan or inf values
def del_nan_or_inf_cols(data):
for col in data.columns:
if np.all(~np.isnan(data[col])) and np.all(np.isfinite(data[col])):
continue
else:
data.drop([col], axis=1, inplace=True)
print("dropped col: ",col)
# return data
del_nan_or_inf_cols(scaled_data)
#%%
def remove_correlated_variables(data, columns, target, inplace = False):
# calculate the correlation matrix for the predictors only
df_corr = data.corr(method='spearman')
# create a mask to ignore self-correlation
mask = np.ones(df_corr.columns.size) - np.eye(df_corr.columns.size)
df_corr = mask * df_corr
drops = []
# loop through each variable, drop the target from analysis though
for col in df_corr.drop(target,axis=1).columns.values:
# if we've already determined to drop the current variable, continue
if np.isin([col], drops):
continue
# find all the variables that are highly correlated with the current variable
# and add them to the drop list
corr = df_corr[abs(df_corr[col]) > 0.93].index #remove all above 0.9x correlation-we varied x for tests
drops = np.union1d(drops, corr)
print("\nDropping", drops.shape[0], "highly correlated features...n", drops)
return data.drop(drops, axis=1, inplace=inplace)
remove_correlated_variables(scaled_data, predictors, target, inplace = True)
#%%
scaled_data.info()
#%%
"""
# =============================================================================
# adding original variables and target to the dataset
# =============================================================================
# do we have all the original predictors in the scaled_data?
np.isin(train.columns,scaled_data.columns)
#if not...
#%%
#data = scaled_data_vif
data = scaled_data
predictors = data.drop(target, axis=1).columns.values
# add all the original predictors to the dataset and let XGB find the best predictor set for best CV score
# by using the feature importances of XGB
for x in train.drop(target, axis=1).columns:
if x not in predictors:
data[x] = train[x]
# reassigning predictors
predictors = data.drop(target, axis=1).columns.values
data.info()
#%%
scaled_data.info()
"""
#%%
# =============================================================================
# # test the variables for effect on classification
# splitting strategy
# =============================================================================
#cv_split = StratifiedShuffleSplit(n_splits = 4, random_state = 42)
cv_split = StratifiedShuffleSplit(n_splits = 10, random_state = 42) # more splits for oversampled data
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# we compare different clfs, application of pca and feature selection techniques via cv (maybe pipelined)
#%%
# Comparison of some models
# Machine Learning Algorithm (MLA) Selection and Initialization
MLA = [
('rfr', ensemble.RandomForestClassifier(n_jobs = 8, random_state=42)),
('lr', linear_model.LogisticRegression()),
('knn', neighbors.KNeighborsClassifier()),
('svc', svm.SVC()),
('nusvc', svm.NuSVC(nu=0.1)), # we have a little above 10% positive cases (POI) in the dataset
('linearsvc', svm.LinearSVC()),
('xgb', xgboost.XGBClassifier(random_state=42, n_jobs=8)),
('NB', GaussianNB())
]
#%%
# =============================================================================
# apply scaling to data for some MLA algorithms.
# Ideally do this in a cv pipeline with transformations on train and test
# =============================================================================
#unclean data
#data = train
#test with cleaned data (outliers removed)
#data = data1_cl
datatarget = data1_cl
#datatarget = oversampling_blsmote
#test with interaction data without outliers
#scaled_data = pd.concat([scaled_data, datatarget[target]], axis=1)
data = scaled_data
index = data.index
columns = data.drop(target,axis=1).columns
scaler = StandardScaler()
scaler.fit(data[columns])
scaled_data = scaler.transform(data[columns])
scaled_data = pd.DataFrame(scaled_data, columns = columns, index = index) # no need for smote
#scaled_data = pd.DataFrame(scaled_data, columns = columns)
scaled_data = pd.concat([scaled_data, datatarget[target]], axis=1)
#%%
scaled_data.sample(10)
# 300 !
#%%
# =============================================================================
# test effect of oversampling and SMOTE as well !
# in a new project we should probably start oversampling at the beginning -
# not after creating interaction terms and weeding out collinear variables?
# =============================================================================
oversampling_data = scaled_data.copy(deep=True)
for i in range(0,1):
oversampling_data = pd.concat([oversampling_data,oversampling_data[oversampling_data["poi"] == 1]],axis=0)
oversampling_data["poi"].value_counts()
oversampling_data.info()
# now we have a rather balanced dataset, lets look at the impact of using the same data multiple times...
# results for different numbers of POIs below
#%%
# =============================================================================
# # oversampling with smote after feature engineering
# =============================================================================
from imblearn import over_sampling
data = scaled_data
predictors = data.drop(target,axis=1).columns
smote_item = over_sampling.BorderlineSMOTE(random_state = 42)
X_re, y_re = smote_item.fit_resample(scaled_data[predictors],scaled_data[target])
X_re = pd.DataFrame(X_re)
y_re = pd.DataFrame(y_re)
X_re.columns=predictors
y_re.columns=[target]
oversampling_blsmote = pd.concat([X_re, y_re],axis=1)
oversampling_blsmote.sample(5)
#%%
oversampling_blsmote.info()
#%%
oversampling_blsmote["poi"].value_counts()
# even dataset now - 50:50!
#%%
#data = scaled_data
#data = oversampling_data
data = oversampling_blsmote
#data = scaled_data_vif
#data = scaled_data1
predictors=data.drop(target,axis=1).columns
#predictors = xgb_preds
#predictors = xgb_uncleaned_data_pred
#data = data1_dummy
#predictors = data1_dummy.drop(target, axis=1).columns
#create table to compare MLA metrics
MLA_columns = ["MLA Name", "MLA Train f1 Mean", "MLA Test f1 Mean", "MLA Test f1 3*STD", "MLA TIME"]
MLA_compare = pd.DataFrame(columns=MLA_columns)
# index through MLA and save performance to table
row_index = 0
for name, alg in MLA:
#set name and params
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, "MLA Name"] = MLA_name
#score model with cross validation
#we score with f1 score for the best classifier
cv_results = model_selection.cross_validate(alg, data[predictors], data[target], cv=cv_split,
scoring = "f1", return_train_score=True, n_jobs = -1, verbose = True)
MLA_compare.loc[row_index, "MLA TIME"] = cv_results["fit_time"].mean()
MLA_compare.loc[row_index, "MLA Train f1 Mean"] = cv_results["train_score"].mean()
MLA_compare.loc[row_index, "MLA Test f1 Mean"] = cv_results["test_score"].mean()
#if this is a non-bias random sample, then +/-3 std from the mean, should statistically capture 99,7% of the subsets
MLA_compare.loc[row_index, "MLA Test f1 3*STD"] = cv_results["test_score"].std()*3
#let's know the worst that can happen
row_index+=1
# print and sort table:
MLA_compare.sort_values(by= ["MLA Test f1 Mean"], ascending = False, inplace=True)
MLA_compare
#%%
"""
results for original predictor set with 4 folds:
MLA Name MLA Train f1 Mean MLA Test f1 Mean \
0 RandomForestClassifier 0.937224 0
1 LogisticRegression 0.584416 0
2 KNeighborsClassifier 0.185415 0
3 SVC 0.571429 0
4 NuSVC 0.965517 0
5 LinearSVC 0.674783 0
6 XGBClassifier 1 0
MLA Test f1 3*STD MLA TIME
0 0 0.109176
1 0 0.0043052
2 0 0.000978072
3 0 0.00166225
4 0 0.00132322
5 0 0.00430886
6 0 0.0495253
#results for scaled_data without outliers and with interaction terms - 4folds and no original predictors with 4 folds
MLA Name MLA Train f1 Mean MLA Test f1 Mean \
1 LogisticRegression 0.965517 0.45
5 LinearSVC 1 0.283333
6 XGBClassifier 1 0.125
0 RandomForestClassifier 0.964286 0
2 KNeighborsClassifier 0.173407 0
3 SVC 0.551948 0
4 NuSVC 0.956281 0 | |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class MaeOpcion(models.Model):
tipo_opcion_id = models.IntegerField(db_column='TIPO_OPCION_ID') # Field name made lowercase.
nom_opcion = models.CharField(db_column='NOM_OPCION', max_length=50, blank=True,
null=True) # Field name made lowercase.
des_opcion = models.CharField(db_column='DES_OPCION', max_length=50, blank=True,
null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
opcion_id = models.IntegerField(db_column='OPCION_ID') # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_OPCION'
class MaePermiso(models.Model):
id_permiso = models.AutoField(db_column='ID_PERMISO', primary_key=True) # Field name made lowercase.
des_permiso = models.CharField(db_column='DES_PERMISO', max_length=50, blank=True,
null=True) # Field name made lowercase.
cod_permiso = models.CharField(db_column='COD_PERMISO', max_length=8, blank=True,
null=True) # Field name made lowercase.
nom_permiso = models.CharField(db_column='NOM_PERMISO', max_length=50, blank=True,
null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_PERMISO'
class MaeProyecto(models.Model):
id_proyecto = models.AutoField(db_column='ID_PROYECTO', primary_key=True) # Field name made lowercase.
sigla_proy = models.CharField(db_column='SIGLA_PROY', max_length=20, blank=True,
null=True) # Field name made lowercase.
anio_proy = models.CharField(db_column='ANIO_PROY', max_length=4, blank=True,
null=True) # Field name made lowercase.
des_proy = models.CharField(db_column='DES_PROY', max_length=60, blank=True,
null=True) # Field name made lowercase.
tipo_proy = models.CharField(db_column='TIPO_PROY', max_length=1, blank=True,
null=True) # Field name made lowercase.
fec_inicio = models.DateTimeField(db_column='FEC_INICIO', blank=True, null=True) # Field name made lowercase.
fec_final = models.DateTimeField(db_column='FEC_FINAL', blank=True, null=True) # Field name made lowercase.
observacion = models.CharField(db_column='OBSERVACION', max_length=250, blank=True,
null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_PROYECTO'
class MaeRol(models.Model):
id_rol = models.AutoField(db_column='ID_ROL', primary_key=True) # Field name made lowercase.
des_rol = models.CharField(db_column='DES_ROL', max_length=50, blank=True, null=True) # Field name made lowercase.
nom_rol = models.CharField(db_column='NOM_ROL', max_length=50, blank=True, null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_ROL'
class MaeSistema(models.Model):
id_sistema = models.AutoField(db_column='ID_SISTEMA', primary_key=True) # Field name made lowercase.
des_sist = models.CharField(db_column='DES_SIST', max_length=18, blank=True,
null=True) # Field name made lowercase.
nom_sist = models.CharField(db_column='NOM_SIST', max_length=18, blank=True,
null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_SISTEMA'
class MaeTipoOpcion(models.Model):
tipo_opcion_id = models.IntegerField(db_column='TIPO_OPCION_ID') # Field name made lowercase.
nom_tipo_opcion = models.CharField(db_column='NOM_TIPO_OPCION', max_length=50, blank=True,
null=True) # Field name made lowercase.
des_tipo_opcion = models.CharField(db_column='DES_TIPO_OPCION', max_length=50, blank=True,
null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_TIPO_OPCION'
class MaeUsuario(models.Model):
id_usuario = models.AutoField(db_column='ID_USUARIO', primary_key=True) # Field name made lowercase.
dni = models.CharField(db_column='DNI', max_length=8) # Field name made lowercase.
ape_pat_per = models.CharField(db_column='APE_PAT_PER', max_length=35, blank=True,
null=True) # Field name made lowercase.
ape_mat_per = models.CharField(db_column='APE_MAT_PER', max_length=35, blank=True,
null=True) # Field name made lowercase.
nom_emp_per = models.CharField(db_column='NOM_EMP_PER', max_length=35, blank=True,
null=True) # Field name made lowercase.
fec_nac_per = models.DateTimeField(db_column='FEC_NAC_PER', blank=True, null=True) # Field name made lowercase.
email_insti = models.CharField(db_column='EMAIL_INSTI', max_length=50, blank=True,
null=True) # Field name made lowercase.
sex_emp_per = models.CharField(db_column='SEX_EMP_PER', max_length=1, blank=True,
null=True) # Field name made lowercase.
usuario = models.CharField(db_column='USUARIO', max_length=20, blank=True, null=True) # Field name made lowercase.
clave = models.CharField(db_column='CLAVE', max_length=50, blank=True, null=True) # Field name made lowercase.
ult_logeo = models.DateTimeField(db_column='ULT_LOGEO', blank=True, null=True) # Field name made lowercase.
es_super = models.CharField(db_column='ES_SUPER', max_length=1, blank=True, null=True) # Field name made lowercase.
no_cambio = models.CharField(db_column='NO_CAMBIO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'MAE_USUARIO'
class TbAmbito(models.Model):
id_ambito = models.CharField(db_column='ID_AMBITO', max_length=18) # Field name made lowercase.
codigo = models.CharField(db_column='CODIGO', max_length=18, blank=True, null=True) # Field name made lowercase.
descripcion = models.CharField(db_column='DESCRIPCION', max_length=18, blank=True,
null=True) # Field name made lowercase.
descripcion_variable = models.CharField(db_column='DESCRIPCION_VARIABLE', max_length=18, blank=True,
null=True) # Field name made lowercase.
padre_id = models.CharField(db_column='PADRE_ID', max_length=18, blank=True,
null=True) # Field name made lowercase.
id_dis_geo = models.ForeignKey('TbDistribucionGeografica', models.DO_NOTHING,
db_column='ID_DIS_GEO') # Field name made lowercase.
class Meta:
managed = False
db_table = 'TB_AMBITO'
unique_together = (('id_ambito', 'id_dis_geo'),)
class TbAmbitoUsuario(models.Model):
id_ambito = models.ForeignKey(TbAmbito, models.DO_NOTHING, db_column='ID_AMBITO') # Field name made lowercase.
id_dis_geo = models.ForeignKey(TbAmbito, models.DO_NOTHING, db_column='ID_DIS_GEO') # Field name made lowercase.
id_usuario = models.CharField(db_column='ID_USUARIO', max_length=8) # Field name made lowercase.
class Meta:
managed = False
db_table = 'TB_AMBITO_USUARIO'
unique_together = (('id_ambito', 'id_dis_geo', 'id_usuario'),)
class TbBd(models.Model):
servidor_id = models.CharField(db_column='SERVIDOR_ID', max_length=18) # Field name made lowercase.
servicio_id = models.CharField(db_column='SERVICIO_ID', max_length=18) # Field name made lowercase.
bd_id = models.CharField(db_column='BD_ID', max_length=18) # Field name made lowercase.
nom_bd = models.CharField(db_column='NOM_BD', max_length=50, blank=True, null=True) # Field name made lowercase.
flag_activo = models.CharField(db_column='FLAG_ACTIVO', max_length=1, blank=True,
null=True) # Field name made lowercase.
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, null=True) # Field name made lowercase.
usr_edicion = models.CharField(db_column='USR_EDICION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_edicion = models.DateTimeField(db_column='FEC_EDICION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'TB_BD'
unique_together = (('bd_id', 'servidor_id', 'servicio_id'),)
class TbDistribucionGeografica(models.Model):
id_dis_geo = models.CharField(db_column='ID_DIS_GEO', primary_key=True, max_length=18) # Field name made lowercase.
descripcion = models.CharField(db_column='DESCRIPCION', max_length=18, blank=True,
null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'TB_DISTRIBUCION_GEOGRAFICA'
class TbIpDelimitado(models.Model):
flag_eliminado = models.CharField(db_column='FLAG_ELIMINADO', max_length=1, blank=True,
null=True) # Field name made lowercase.
usr_creacion = models.CharField(db_column='USR_CREACION', max_length=8, blank=True,
null=True) # Field name made lowercase.
fec_creacion = models.DateTimeField(db_column='FEC_CREACION', blank=True, | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File name: glide_calc.py
"""
Created on Mon Mar 26 16:21:26 2018
@author: Neo(<EMAIL>)
Some functions related to glide component calculation.
History
N.Liu, 20 Mar 2018:
"""
import numpy as np
from numpy import sin, cos
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord
from astropy import units as u
__all__ = ["glide_gen", "glide_apex_calc", "GA_glide_decomposed",
"glide_field_gen", "glide_plot"]
# ----------------------------- FUNCTIONS -----------------------------
def glide_gen(g, RAdeg, DCdeg, err=None):
'''Given apex and amplitude, calculate the glide vector.
NOTE: inputs RAdeg/DCdeg should be given in degree.
Parameters
----------
g : float
amplitude of glide vector, default value is 1.0.
RAdeg/DCdeg : float
direction of apex expressed in right ascension and declination
given in degree. For Galactic center, they could be set as
267.0/-29.0.
err : array of float
uncertainties of g, RAdeg and DCdeg given in rad/deg/deg,
default value is None.
Returns
----------
[g1, g2, g3] : array of float
glide vector
errn : array of float
vector of glide formal uncertainties
'''
# Degree -> radian
RArad = np.deg2rad(RAdeg)
DCrad = np.deg2rad(DCdeg)
g1 = g * cos(RArad) * cos(DCrad)
g2 = g * sin(RArad) * cos(DCrad)
g3 = g * sin(DCrad)
if err is None:
return np.array([g1, g2, g3])
else:
# Calculate the uncertainties.
M = np.array([
[cos(RArad) * cos(DCrad),
-g * sin(RArad) * cos(DCrad),
-g * cos(RArad) * sin(DCrad)],
[sin(RArad) * cos(DCrad),
g * cos(RArad) * cos(DCrad),
-g * sin(RArad) * sin(DCrad)],
[sin(DCrad),
0,
g * cos(DCrad)]])
# degree -> radian
err[1] = np.deg2rad(err[1])
err[2] = np.deg2rad(err[2])
errn = np.sqrt(np.dot(M**2, err**2))
return np.array([g1, g2, g3]), errn
def glide_apex_calc(gv, err_gv=None):
'''Calculate the apex and amplitude for a given glide vector.
Parameters
----------
gv : array of float
glide vector
err_gv : array of float
formal error of glide vector, default value is None
Returns
----------
g/err_g : amplitude and its uncertainty
RAdeg/err_RA : right ascension and its uncertainty in degree
DCdeg/err_DC : declination and its uncertainty in degree
'''
# Calculate the parameters.
# Amplitude
g = np.sqrt(np.sum(gv ** 2))
# Right ascension
RArad = np.arctan2(gv[1], gv[0])
# arctan2 function result is always between -pi and pi.
# However, RA should be between 0 and 2*pi.
if RArad < 0:
RArad += 2 * np.pi
# Declination
DCrad = np.arctan2(gv[2], np.sqrt(gv[0]**2 + gv[1]**2))
# radian -> degree
RAdeg = np.rad2deg(RArad)
DCdeg = np.rad2deg(DCrad)
if err_gv is None:
return g, RAdeg, DCdeg
else:
# # Method 1)
# M = np.array([
# [cos(RArad) * cos(DCrad),
# g * sin(RArad) * cos(DCrad),
# g * cos(RArad) * sin(DCrad)],
# [sin(RArad) * cos(DCrad),
# g * cos(RArad) * cos(DCrad),
# g * sin(RArad) * sin(DCrad)],
# [sin(DCrad),
# 0,
# g * cos(DCrad)]])
# M_1 = np.linalg.inv(M**2)
# errn = np.sqrt(np.dot(M_1, err_gv**2))
# errA, errRAr, errDCr = errn
# However, the obtained error are always meaningless (minus
# value inner the square operation).
# #) Method 2)
par_g = gv / g
gxy2 = gv[0]**2 + gv[1]**2
gxy = np.sqrt(gxy2)
par_RA = np.array([-gv[1], gv[0], 0]) / gxy2
par_DC = np.array([-gv[0] * gv[2],
-gv[1] * gv[2],
gxy2]) / g**2 / gxy
errA = np.sqrt(np.dot(par_g**2, err_gv**2))
errRAr = np.sqrt(np.dot(par_RA**2, err_gv**2))
errDCr = np.sqrt(np.dot(par_DC**2, err_gv**2))
# --------------- 20 Mar 2018 ----------------------------------
# errA = np.sqrt(np.dot(err_gv, err_gv))
# --------------- 20 Mar 2018 ----------------------------------
# radian -> degree
errRA = np.rad2deg(errRAr)
errDC = np.rad2deg(errDCr)
return g, RAdeg, DCdeg, errA, errRA, errDC
def GA_glide_decomposed(gv, err_gv):
"""Given a glide vector, get the GA component and non-GA component.
'G' stands for amplitude while 'g' for vector
Parameters
----------
gv : array of float
glide vector
err_gv : array of float
formal error of glide vector
Returns
----------
G_GA/err_GA : amplitude and its uncertainty of GA component
g_NonGA : non-GA glide component
"""
GA_hat = glide_gen(1.0, 266.4, -28.9)
# GA component
G_GA = np.dot(gv, GA_hat)
errG_GA = np.dot(err_gv, GA_hat)
# non-GA component
g_NonGA = gv - G_GA * GA_hat
err_NonGA = err_gv - errG_GA * GA_hat
[G_NonGA, RA_NonGA, DC_NonGA,
errG_NonGA, errRA_NonGA, errDC_NonGA] = glide_apex_calc(g_NonGA, err_NonGA)
return [G_GA, errG_GA, G_NonGA, RA_NonGA, DC_NonGA,
errG_NonGA, errRA_NonGA, errDC_NonGA]
def glide_field_gen(gv, ra, dec):
"""Generate a field at (ra,dec) the glide scale.
Parameters
----------
gv : array of float
glide vector
ra : array of float
right ascension
dec : array of float
declination
Returns
----------
g_dra : array of float
RA offset induced by glide
g_ddec : array of float
Dec. offset induced by glide
"""
g1, g2, g3 = gv
g_dra = -g1 * sin(ra) + g2 * cos(ra)
g_ddec = - g1*cos(ra)*sin(dec) - g2*sin(ra)*sin(dec) + g3 * cos(dec)
return g_dra, g_ddec
def glide_plot(gv, output=None, fig_title=None):
"""Plot fot glide field.
Parameters
----------
gv : array of float
glide vector
output : string
Full name of the output file
fig_title : string
Title of the figure
Returns
----------
None
"""
ra = np.linspace(0, 360, 20) * u.deg
dec = (np.linspace(-90, 90, 20)) * u.deg
c = SkyCoord(ra=ra, dec=dec, frame='icrs')
ra_rad = c.ra.wrap_at(180 * u.deg).radian
dec_rad = c.dec.radian
# Glide field
X, Y = np.meshgrid(ra_rad, dec_rad)
U, V = glide_field_gen(gv, X, Y)
# GC position
c1_gal = SkyCoord(l=0 * u.deg, b=0 * u.deg, frame="galactic")
c1_icrs = c1_gal.icrs
ra_gc_rad = c1_icrs.ra.wrap_at(180 * u.deg).radian
dec_gc_rad = c1_icrs.dec.radian
# Anti-GC position
c2_gal = SkyCoord(l=180 * u.deg, b=0 * u.deg, frame="galactic")
c2_icrs = c2_gal.icrs
ra_agc_rad = c2_icrs.ra.wrap_at(180 * u.deg).radian
dec_agc_rad = c2_icrs.dec.radian
# Plot
plt.figure(figsize=(8, 4.2))
plt.subplot(111, projection="aitoff")
Q = plt.quiver(X, Y, U, V, units='xy', scale=100.0)
qk = plt.quiverkey(Q, 0.90, 0.90, 50, r'$50 \mu as$', labelpos='E',
coordinates='figure')
# Show the position of GC and anti-GC
plt.plot(ra_gc_rad, dec_gc_rad, 'r+')
plt.plot(ra_agc_rad, dec_agc_rad, 'r+')
plt.text(ra_gc_rad, dec_gc_rad, "GC", color="r")
# plt.text(ra_gc_rad, dec_gc_rad, "Anti-GC")
if not fig_title is None:
plt.title(fig_title, y=1.08)
plt.grid(True)
plt.subplots_adjust(top=0.95, bottom=0.05)
if output is None:
plt.show()
else:
plt.savefig(output)
def test_code(flag):
'''Used for verified the code.
'''
if flag is 1:
# print(glide_apex_calc(np.array([1.2, 2.4, 7.5]),
# np.array([0.6, 0.8, 1.0])))
# Verify the result of Table 1 in Titov et al.(2011) (A&A 529, A91)
print('\nTo verify the result of Table 1 in Titov et al.(2011):')
# Result in the paper
dip = np.array([[-0.7, -5.9, -2.2],
[-1.9, -7.1, -3.5],
[-1.6, -6.0, -2.9],
[-3.4, -6.2, -3.8],
[+4.2, -4.6, +1.4]])
derr = np.array([[0.8, 0.9, 1.0],
[1.0, 1.1, 1.2],
[0.9, 1.0, 1.1],
[1.0, 1.2, 1.2],
[1.3, 1.3, 1.7]])
par = np.array([[6.4, 263, -20],
[8.1, 255, -25],
[6.9, 255, -25],
[8.0, 241, -28],
[6.4, 312, 13]])
perr = np.array([[1.5, 11, 12],
[1.9, 11, 11],
[1.7, 11, 11],
[2.0, 12, 12],
[2.5, 16, 21]])
for (dipi, derri, pari, perri) in zip(dip, derr, par, perr):
print('Result in paper: ', pari, perri)
print('Result of my calculation:', glide_apex_calc(dipi, derri))
print('For g:', np.sqrt(np.dot(derri, derri)))
# Verify the result of Table 1 in Liu et al.(2012) (A&A 548, A50)
print('\nTo verify the result of Table 1 in Liu et al.(2012):')
# Result in the paper
dip = np.array([[+1.07, -0.20, +0.18],
[+0.22, -0.03, +0.04],
[+0.01, -0.14, +0.28],
[+0.89, -0.08, +0.04]])
derr = np.array([[0.08, 0.08, 0.09],
[0.05, 0.05, 0.05],
[0.07, 0.07, 0.07],
[0.02, 0.02, 0.02]])
par = np.array([[1.10],
[0.22],
[0.31],
[0.89]])
perr = np.array([[0.14],
[0.09],
[0.12],
[0.03]])
for (dipi, derri, pari, perri) in zip(dip, derr, par, perr):
print('Result in paper: ', pari, perri)
print('Result of my calculation:', glide_apex_calc(dipi, derri))
print('For g:', np.sqrt(np.dot(derri, derri)))
# Verify the result of Table 1 in Titov et al.(2013) (A&A 559, A95)
print('\nTo verify the result of Table 1 in Titov et al.(2013):')
# Result in the paper
dip = np.array([[-0.4, -5.7, -2.8],
[+0.7, -6.2, -3.3],
[+0.7, -6.2, -3.3]])
derr = np.array([[0.7, 0.8, 0.9],
[0.8, 0.9, 1.0],
[0.9, 1.0, 1.0]])
par = np.array([[6.4, 266, -26],
[7.1, 277, -28],
[7.1, 277, -28]])
perr = np.array([[1.1, 7, 7],
[1.3, 7, 7],
[1.4, 9, 8]])
for (dipi, derri, pari, perri) in zip(dip, derr, par, perr):
print('Result in paper: ', pari, perri)
print('Result of my calculation:', glide_apex_calc(dipi, derri))
print('For g:', np.sqrt(np.dot(derri, derri)))
# Verify the result of Table 1 in Titov et al.(2018) (A&A 610, A36)
print('\nTo verify the result of Table 1 in Titov et | |
a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ModelQualityJobDefinitionClusterConfig(dict):
"""
Configuration for the cluster used to run model monitoring jobs.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceCount":
suggest = "instance_count"
elif key == "instanceType":
suggest = "instance_type"
elif key == "volumeSizeInGB":
suggest = "volume_size_in_gb"
elif key == "volumeKmsKeyId":
suggest = "volume_kms_key_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionClusterConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionClusterConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionClusterConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_count: int,
instance_type: str,
volume_size_in_gb: int,
volume_kms_key_id: Optional[str] = None):
"""
Configuration for the cluster used to run model monitoring jobs.
:param int instance_count: The number of ML compute instances to use in the model monitoring job. For distributed processing jobs, specify a value greater than 1. The default value is 1.
:param str instance_type: The ML compute instance type for the processing job.
:param int volume_size_in_gb: The size of the ML storage volume, in gigabytes, that you want to provision. You must specify sufficient ML storage for your scenario.
:param str volume_kms_key_id: The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
"""
pulumi.set(__self__, "instance_count", instance_count)
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "volume_size_in_gb", volume_size_in_gb)
if volume_kms_key_id is not None:
pulumi.set(__self__, "volume_kms_key_id", volume_kms_key_id)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> int:
"""
The number of ML compute instances to use in the model monitoring job. For distributed processing jobs, specify a value greater than 1. The default value is 1.
"""
return pulumi.get(self, "instance_count")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
The ML compute instance type for the processing job.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="volumeSizeInGB")
def volume_size_in_gb(self) -> int:
"""
The size of the ML storage volume, in gigabytes, that you want to provision. You must specify sufficient ML storage for your scenario.
"""
return pulumi.get(self, "volume_size_in_gb")
@property
@pulumi.getter(name="volumeKmsKeyId")
def volume_kms_key_id(self) -> Optional[str]:
"""
The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
"""
return pulumi.get(self, "volume_kms_key_id")
@pulumi.output_type
class ModelQualityJobDefinitionConstraintsResource(dict):
"""
The baseline constraints resource for a monitoring job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Uri":
suggest = "s3_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionConstraintsResource. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionConstraintsResource.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionConstraintsResource.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_uri: Optional[str] = None):
"""
The baseline constraints resource for a monitoring job.
:param str s3_uri: The Amazon S3 URI for baseline constraint file in Amazon S3 that the current monitoring job should validated against.
"""
if s3_uri is not None:
pulumi.set(__self__, "s3_uri", s3_uri)
@property
@pulumi.getter(name="s3Uri")
def s3_uri(self) -> Optional[str]:
"""
The Amazon S3 URI for baseline constraint file in Amazon S3 that the current monitoring job should validated against.
"""
return pulumi.get(self, "s3_uri")
@pulumi.output_type
class ModelQualityJobDefinitionEndpointInput(dict):
"""
The endpoint for a monitoring job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointName":
suggest = "endpoint_name"
elif key == "localPath":
suggest = "local_path"
elif key == "endTimeOffset":
suggest = "end_time_offset"
elif key == "inferenceAttribute":
suggest = "inference_attribute"
elif key == "probabilityAttribute":
suggest = "probability_attribute"
elif key == "probabilityThresholdAttribute":
suggest = "probability_threshold_attribute"
elif key == "s3DataDistributionType":
suggest = "s3_data_distribution_type"
elif key == "s3InputMode":
suggest = "s3_input_mode"
elif key == "startTimeOffset":
suggest = "start_time_offset"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelQualityJobDefinitionEndpointInput. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelQualityJobDefinitionEndpointInput.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelQualityJobDefinitionEndpointInput.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_name: str,
local_path: str,
end_time_offset: Optional[str] = None,
inference_attribute: Optional[str] = None,
probability_attribute: Optional[str] = None,
probability_threshold_attribute: Optional[float] = None,
s3_data_distribution_type: Optional['ModelQualityJobDefinitionEndpointInputS3DataDistributionType'] = None,
s3_input_mode: Optional['ModelQualityJobDefinitionEndpointInputS3InputMode'] = None,
start_time_offset: Optional[str] = None):
"""
The endpoint for a monitoring job.
:param str local_path: Path to the filesystem where the endpoint data is available to the container.
:param str end_time_offset: Monitoring end time offset, e.g. PT0H
:param str inference_attribute: Index or JSONpath to locate predicted label(s)
:param str probability_attribute: Index or JSONpath to locate probabilities
:param 'ModelQualityJobDefinitionEndpointInputS3DataDistributionType' s3_data_distribution_type: Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defauts to FullyReplicated
:param 'ModelQualityJobDefinitionEndpointInputS3InputMode' s3_input_mode: Whether the Pipe or File is used as the input mode for transfering data for the monitoring job. Pipe mode is recommended for large datasets. File mode is useful for small files that fit in memory. Defaults to File.
:param str start_time_offset: Monitoring start time offset, e.g. -PT1H
"""
pulumi.set(__self__, "endpoint_name", endpoint_name)
pulumi.set(__self__, "local_path", local_path)
if end_time_offset is not None:
pulumi.set(__self__, "end_time_offset", end_time_offset)
if inference_attribute is not None:
pulumi.set(__self__, "inference_attribute", inference_attribute)
if probability_attribute is not None:
pulumi.set(__self__, "probability_attribute", probability_attribute)
if probability_threshold_attribute is not None:
pulumi.set(__self__, "probability_threshold_attribute", probability_threshold_attribute)
if s3_data_distribution_type is not None:
pulumi.set(__self__, "s3_data_distribution_type", s3_data_distribution_type)
if s3_input_mode is not None:
pulumi.set(__self__, "s3_input_mode", s3_input_mode)
if start_time_offset is not None:
pulumi.set(__self__, "start_time_offset", start_time_offset)
@property
@pulumi.getter(name="endpointName")
def endpoint_name(self) -> str:
return pulumi.get(self, "endpoint_name")
@property
@pulumi.getter(name="localPath")
def local_path(self) -> str:
"""
Path to the filesystem where the endpoint data is available to the container.
"""
return pulumi.get(self, "local_path")
@property
@pulumi.getter(name="endTimeOffset")
def end_time_offset(self) -> Optional[str]:
"""
Monitoring end time offset, e.g. PT0H
"""
return pulumi.get(self, "end_time_offset")
@property
@pulumi.getter(name="inferenceAttribute")
def inference_attribute(self) -> Optional[str]:
"""
Index or JSONpath to locate predicted label(s)
"""
return pulumi.get(self, "inference_attribute")
@property
@pulumi.getter(name="probabilityAttribute")
def probability_attribute(self) -> Optional[str]:
"""
Index or JSONpath to locate probabilities
"""
return pulumi.get(self, "probability_attribute")
@property
@pulumi.getter(name="probabilityThresholdAttribute")
def probability_threshold_attribute(self) -> Optional[float]:
return pulumi.get(self, "probability_threshold_attribute")
@property
@pulumi.getter(name="s3DataDistributionType")
def s3_data_distribution_type(self) -> Optional['ModelQualityJobDefinitionEndpointInputS3DataDistributionType']:
"""
Whether input data distributed in Amazon S3 is fully replicated or sharded by an S3 key. Defauts to FullyReplicated
"""
return pulumi.get(self, "s3_data_distribution_type")
@property
@pulumi.getter(name="s3InputMode")
def s3_input_mode(self) -> Optional['ModelQualityJobDefinitionEndpointInputS3InputMode']:
"""
Whether the Pipe or File is used as the input mode for transfering data for the monitoring job. Pipe mode is recommended for large datasets. File mode is useful for small files that fit in memory. Defaults to File.
"""
return pulumi.get(self, "s3_input_mode")
@property
@pulumi.getter(name="startTimeOffset")
def start_time_offset(self) -> Optional[str]:
"""
Monitoring start time offset, e.g. -PT1H
"""
return pulumi.get(self, "start_time_offset")
@pulumi.output_type
class ModelQualityJobDefinitionModelQualityAppSpecification(dict):
"""
Container image configuration object for the monitoring job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "imageUri":
suggest = | |
only_now 안쓰면 한 파일에 모두 씀
self.action_visualize_sceneMap() # 인식 결과 출력 (read SceneMap)
if self.SAVE_IMAGE:
image_dir = '/media/ailab/D/ai2thor/thorDBv2/images'
depth_dir = '/media/ailab/D/ai2thor/thorDBv2/depth_images'
if not os.path.isdir(image_dir):
os.makedirs(image_dir, exist_ok=True)
if not os.path.isdir(depth_dir):
os.makedirs(depth_dir, exist_ok=True)
t=self.GSGMgr.sceneMap.get_time()
sn = self.thorCtrl.scene_name
image_path = os.path.join(image_dir, f'{sn}_{str(t)}.jpg')
depth_path = os.path.join(depth_dir, f'{sn}_d_{str(t)}.jpg')
self.capture(image_path=image_path, depth_path=depth_path)
if CHECK_TIME:
sub_time = time.time() - sub_st2
print('[TIME] action_visualize_detection() : {}s'.format(str(sub_time)[:4]))
print('*************')
# self.action_bb_test() # 바운딩 박스 시각화 테스트
# if len(inputs) > 0:
# print(inputs)
if self.is_recording:
self.action_record() # 레코드 체크 및 이미지 저장/로그 추가
if self.record_action:
self.action_record_action(inputs) # action만 재현을 위해 따로 저장
def action_record_action(self, inputs):
# action = [inputs['action']]
# if 'target' in inputs:
# action += [inputs['target']]
# self.action_history.append(action)
if 'target_obj' in inputs:
inputs.pop('target_obj')
self.action_history.append(inputs)
def action_show_objects(self):
# object 액션 리스트 갱신
vis_objs = self.thorCtrl.get_visible_objects()
for i in reversed(range(self.ui.layout_objects.count())): # 스크롤 영역 clear
layout = self.ui.layout_objects.itemAt(i).layout()
for j in reversed(range(layout.count())):
layout.itemAt(j).widget().deleteLater()
layout.deleteLater()
for idx, obj in enumerate(vis_objs):
container = QHBoxLayout()
label, open_btn, pickup_btn = QLabel(), QPushButton(), QPushButton()
container.addWidget(label)
container.addWidget(open_btn)
container.addWidget(pickup_btn)
self.ui.layout_objects.addLayout(container) # 스크롤 영역에 추가
label.setText(obj['objectType']) # name으로 변경 가능
if obj['openable']: # open/close 체크
if obj['isopen']:
open_btn.setText('close')
open_btn.clicked.connect(self.action_close(idx))
else:
open_btn.setText('open')
open_btn.clicked.connect(self.action_open(idx))
else:
open_btn.setEnabled(False)
if obj['receptacle']: # put/pickup 체크
if self.thorCtrl.get_inventory() is None:
pickup_btn.setText('put(no item)')
pickup_btn.setEnabled(False)
elif obj['openable'] and not obj['isopen']:
pickup_btn.setText('put(closed)')
pickup_btn.setEnabled(False)
elif obj['receptacleCount'] == len(obj['receptacleObjectIds']):
pickup_btn.setText('put(full)')
pickup_btn.setEnabled(False)
else:
pickup_btn.setText('put')
pickup_btn.clicked.connect(self.action_put(idx))
elif obj['pickupable']:
if self.thorCtrl.get_inventory() is not None:
pickup_btn.setText('pickup(full)')
pickup_btn.setEnabled(False)
else:
pickup_btn.setText('pickup')
pickup_btn.clicked.connect(self.action_pickup(idx))
else:
pickup_btn.setEnabled(False)
def action_show_inventory(self):
item = self.thorCtrl.get_inventory()
if item is None:
self.ui.tx_inventory.setText('')
else:
self.ui.tx_inventory.setText(item['objectType'])
def action_show_result(self):
isSuccess, previous_action, errorMessage = self.thorCtrl.get_previous_action_result()
if isSuccess:
self.ui.la_success.setText('success')
self.ui.la_error_msg.setText('')
else:
self.ui.la_success.setText('fail')
self.ui.la_error_msg.setText(errorMessage)
self.ui.la_action.setText(previous_action)
def action_update_log(self):
vis_objs = self.thorCtrl.get_visible_objects()
isSuccess, previous_action, errorMessage = self.thorCtrl.get_previous_action_result()
self.ui.tb_scene_log.append('[step {}]'.format(self.thorCtrl.step_count))
success_text = 'success' if isSuccess else 'fail'
self.ui.tb_scene_log.append('action : {} ({})'.format(previous_action, success_text))
if not isSuccess:
self.ui.tb_scene_log.append(' error msg : {}'.format(errorMessage))
self.ui.tb_scene_log.append('visible object :')
if len(vis_objs) == 0:
self.ui.tb_scene_log.append(' nothing')
for idx, vis_obj in enumerate(vis_objs):
self.ui.tb_scene_log.append(' {}. n:{} id:{}'.format(idx+1, vis_obj['name'], vis_obj['objectId']))
self.ui.tb_scene_log.append('-'*50)
def action_record(self):
# 레코딩 중이 아니거나, 일시정지(pause) 중이면 기록 안함, (액션 끝날 때마다 호출됨)
if not self.is_recording:
return
isSuccess, _, _ = self.thorCtrl.get_previous_action_result()
if not isSuccess:
return
if hasattr(self, 'record_count'):
self.record_count += 1
else:
self.record_count = 1 # record 한 번 하면서 시작하므로 1부터 시작
scene_info = self.recorder_get_scene_annotation(save_image=True) # scene annotation 생성 & 영상 저장
self.thorDB.add(scene_info) # DB에 현재 frame annotation 추가
if self.record_count % 100 == 0 and self.record_count > 0:
self.thorDB.save_json(self.object_fileName) # 축적된 annotation을 json으로 저장
self.ui.la_record_count.setText(str(self.record_count))
self.ui.la_db_len.setText(str(self.thorDB.next_data_id))
def action_visualize_sceneMap(self):
# 그래프 그릴 때 쓸 색상 생성
colors_plt = plt.cm.Set3(np.linspace(0, 1, len(au.object_label)))
rgb = (colors_plt[:, :3] * 255).astype('int32')
colors_gv = []
for row in rgb:
out = '#'
for value in row:
if len(hex(value)[2:]) == 1:
out += '0' + hex(value)[2:]
else:
out += hex(value)[2:]
colors_gv.append(out)
# bbox 시각화
if self.ui.check_show_bbox.isChecked():
if CHECK_TIME:
sub_st = time.time()
frame = self.thorCtrl.get_image() # RGB
if not 'bbox_fig' in dir(self):
self.bbox_fig = plt.figure(figsize=(6.0, 6.0))
ax = plt.Axes(self.bbox_fig, [0., 0., 1., 1.])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
self.bbox_fig.add_axes(ax)
self.ax = ax
self.ax.cla()
self.ax.imshow(frame)
od_results = self.GSGMgr.get_od_results() # sceneMap으로부터 OD 결과 가져옴
boxes = od_results['boxes']
classes = od_results['classes']
# DNN 안써도 동일하게 동작 (위 주석 코드는 원본)
for i in range(len(boxes)):
rect = patches.Rectangle((boxes[i][0], boxes[i][1]), boxes[i][2] - boxes[i][0],
boxes[i][3] - boxes[i][1],
linewidth=6, edgecolor=colors_plt[au.obj_s2i[classes[i]]],
facecolor='none')
self.ax.add_patch(rect)
# self.ax.text(boxes[i][0], boxes[i][1] - 9, classes[i],
# style='italic',
# bbox={'facecolor': colors_plt[au.obj_s2i[classes[i]]], 'alpha': 0.5}, fontsize=15)
self.ax.text(boxes[i][0], boxes[i][1] - 15, classes[i],
style='italic',
bbox={'facecolor': colors_plt[au.obj_s2i[classes[i]]], 'alpha': 0.5}, fontsize=25)
# plt.show()
plt.savefig(fname='temp_od.jpg', bbox_inches='tight', pad_inches=0)
self.OdDialog.show_image(filePath='temp_od.jpg')
if CHECK_TIME:
sub_time = time.time() - sub_st
print('[TIME] draw OD results : {}s'.format(str(sub_time)[:4]))
gsg = self.GSGMgr.get_gsg() # sceneMap으로부터 gsg 가져옴
# 3D 위치 시각화
if self.ui.check_show_om.isChecked():
if not hasattr(self, 'draw_answer'):
if CHECK_TIME:
sub_st = time.time()
aws_objects = self.thorCtrl.get_all_objects()
aws_poses = []
aws_labals = []
for obj in aws_objects:
pos = []
pos += [obj['position']['x'], obj['position']['y'], obj['position']['z']]
b_3d = obj['bounds3D']
size_3d = [b_3d[3] - b_3d[0], b_3d[4] - b_3d[1], b_3d[5] - b_3d[2]]
pos += size_3d
aws_poses.append(pos)
aws_labals.append(au.obj_s2i[obj['objectType']])
map_img_path = du.draw_object_3d_map(pos=np.array(aws_poses), labels=np.array(aws_labals),
colors=colors_gv,
draw_answer=True)
self.draw_answer = True
if CHECK_TIME:
sub_time = time.time() - sub_st
print('[TIME] Aws draw_object_3d_map() : {}s'.format(str(sub_time)[:4]))
if CHECK_TIME:
sub_st = time.time()
if len(gsg['objects']) > 0:
gsg_objects = gsg['objects']
gsg_pos = np.array([obj['box3d'] for obj in gsg_objects.values()])
gsg_labels = np.array([obj['label'] for obj in gsg_objects.values()])
map_img_path = du.draw_object_3d_map(pos=gsg_pos, labels=gsg_labels, colors=colors_gv)
# 에이전트 위치 그리기
# agent_info = self.thorCtrl.get_event().metadata['agent']
# agent_pos = [agent_info['position']['x'], agent_info['position']['y'],
# agent_info['position']['z']]
# #print('agent pos:', agent_pos) ###
#
# map_img_path = du.draw_agent(agent_pos, draw_answer=False)
self.OMDialog.show_image(filePath=map_img_path) # 에러 없으면 if문 밖에 둬야함 ###
if CHECK_TIME:
sub_time = time.time() - sub_st
print('[TIME] Predict DRAW & SHOW draw_object_3d_map() : {}s'.format(str(sub_time)[:4]))
# Scene Graph 시각화
if self.ui.check_show_graph.isChecked():
sg = gv.Digraph('structs', format='png') # 그래프 생성
# sg = gv.Digraph('structs', format='pdf') # 그래프 생성
# graphviz 색상표
# https://www.graphviz.org/doc/info/colors.html#brewer
if CHECK_TIME:
sub_st = time.time()
for oid, obj in gsg['objects'].items(): # object node, 속성 node/edge 추가
# for idx in range(len(boxes)):
if obj['detection']:
penwidth = '2'
pencolor = 'red'
else:
penwidth = '0'
pencolor = 'blue'
with sg.subgraph(name=str(oid)) as obj_g:
# 물체 추가
obj_g.node(str(oid), label=au.obj_i2s[int(obj['label'])]+f'_{oid}', shape='box',
style='filled', fillcolor=colors_gv[int(obj['label'])], fontsize='20',
penwidth=penwidth,
color=pencolor)
if int(obj['open_state']) != au.openState_s2i['unable']:
# is_open 추가
obj_g.node(str(oid) + '_isOpen', label=au.openState_i2s[int(obj['open_state'])],
shape='ellipse', style='filled', color='lightseagreen', fontsize='15')
obj_g.edge(str(oid) + '_isOpen', str(oid), dir='none')
# color 추가
obj_g.node(str(oid) + '_color', label=au.color_i2s[int(obj['color'])],
shape='ellipse',
style='filled', color='lightskyblue1', fontsize='15')
obj_g.edge(str(oid) + '_color', str(oid), dir='none')
for key, relation in gsg['relations'].items(): # 관계 edge 추가
if au.rel_i2s[relation['rel_class']] == 'background':
continue
sg.edge(str(relation['subject_id']), str(relation['object_id']),
label=' ' + au.rel_i2s[relation['rel_class']], fontsize='16')
# agent 정보 추가 (agent, inventory_object)
with sg.subgraph(name=str(999)) as obj_g:
obj_g.node(str(999), label='Agent', shape='box',
style='filled', fontsize='25',
color='gray')
owned_obj_id = self.GSGMgr.sceneMap.get_agent_object_id()
if owned_obj_id is not None:
sg.edge(str(999), str(owned_obj_id),
label=' has', fontsize='16')
if CHECK_TIME:
sub_time = time.time() - sub_st
print('[TIME] draw Graph (DNN results) : {}s'.format(str(sub_time)[:4]))
# sg.render('./gv_temp.gv', view=True, cleanup=True) # 랜더링
if CHECK_TIME:
sub_st = time.time()
# 랜더링이 상당히 오래걸림
filePath = sg.render('./temp', view=False, cleanup=False) # 랜더링
self.SgDialog.show_image(filePath=filePath)
if CHECK_TIME:
sub_time = time.time() - sub_st
print('[TIME] show scene graph : {}s'.format(str(sub_time)[:4]))
@pyqtSlot()
def save_results(self, seed=None):
# GSG 결과 => json, owl
# action history => json
if self.ui.check_use_dnn.isChecked():
dir_path = './datasets/190514 gsg_pred_OATR_only_recog'
else:
dir_path = './datasets/190514 gsg_gt'
os.makedirs(dir_path, exist_ok=True)
scene_name = self.thorCtrl.scene_name
#### action history to json ####
if self.record_action:
action_path = os.path.join(dir_path, 'action_history')
os.makedirs(action_path, exist_ok=True)
action_db = dict()
action_db['actions'] = self.action_history
action_db['scene_name'] = scene_name
with open(os.path.join(action_path, scene_name + '.json'), 'w') as f:
json.dump(action_db, f, indent='\t')
print('save action_history ({})'.format(os.path.join(action_path, scene_name+'.json')))
###############################
######## GSG to OWL ########
# sceneMap의 use_history가 켜져있어야 작동됨 (action history load시 자동으로 켜짐)
owl_path = os.path.join(dir_path, 'owl', scene_name)
if seed is not None:
owl_path = os.path.join(owl_path, f'S{seed}')
file_prefix = f'{scene_name}_S{seed}'
else:
owl_path = os.path.join(owl_path, 'NS')
file_prefix = f'{scene_name}_NS'
os.makedirs(owl_path, exist_ok=True)
for k, gsg in self.GSGMgr.sceneMap.get_gsg_history().items():
ou.write_owl_from_gsg({k:gsg},
file_name=f'{file_prefix}_T{k}.owl',
dir_path=owl_path) # only_now 안쓰면 한 파일에 모두 씀
###########################
####### GSG to json #######
gsg_path = os.path.join(dir_path, 'gsg', scene_name)
os.makedirs(gsg_path, exist_ok=True)
if seed is not None:
file_prefix = f'{scene_name}_S{seed}'
else:
file_prefix = f'{scene_name}_NS'
with open(os.path.join(gsg_path, f'{file_prefix}.json'), 'w') as f:
json_db = {}
json_db['mode'] = 'dynamic'
json_db['seed'] = seed
json_db['scene_name'] = scene_name
json_db['gsg_history'] = self.GSGMgr.sceneMap.get_gsg_history()
#self.GSGMgr.sceneMap.print_dict(json_db) # numpy는 저장이 안되서, numpy 있나 확인하는 용임
# if self.record_action:
# json_db['actions'] = self.action_history
# else:
# json_db['actions'] = []
json.dump(json_db, f, indent='\t')
print('save GSG file ({})'.format(os.path.join(gsg_path, f'{file_prefix}.json')))
###########################
@pyqtSlot()
def run_action_history_BT(self):
if self.ui.check_record_action.isChecked():
self.action_history = []
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
ah_path, _ = QFileDialog.getOpenFileName(self, "Select Action History File (Open)",
"./datasets/action_history",
"Json Files (*.json)", options=options)
if ah_path == '': # 취소할 경우
return
# with open(ah_path, 'r') as f:
# action_history = json.load(f)
# self.init_reset()
# self.thorCtrl.set_scene(action_history['scene_name'])
#
# for action_list in action_history['actions']:
# action = action_list[0]
# if action in ['pickup', 'put', 'open', 'close', 'teleport']:
# target = action_list[1]
# eval('self.action_{}({})()'.format(action, target))
# else:
# eval('self.action_{}()'.format(action))
print('run action history')
self.run_action_history(ah_path)
@pyqtSlot()
def run_action_histories_BT(self, use_seed=True):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
ah_path = QFileDialog.getExistingDirectory(self, "Select Action Histories Folder (Open)",
"./datasets",
QFileDialog.ShowDirsOnly)
if ah_path == '': # 취소할 경우
return
print('start run actions...')
st = time.time()
action_histories = os.listdir(ah_path)
action_histories = [ah for ah in action_histories if ah.split('.')[-1] == 'json'] # 확장자가 json인 파일만 추림
action_histories.sort()
self.GSGMgr.set_use_history(True)
for i, file_name in | |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Load, update and manage skills on this device."""
import os
from os.path import basename
from glob import glob
from threading import Thread, Event, Lock
from time import sleep, monotonic
from mycroft.util.process_utils import ProcessStatus, StatusCallbackMap, ProcessState
from mycroft.api import is_paired
from mycroft.enclosure.api import EnclosureAPI
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.util import connected
from mycroft.skills.settings import SkillSettingsDownloader
from mycroft.skills.skill_loader import get_skill_directories, SkillLoader, PluginSkillLoader, find_skill_plugins
from mycroft.skills.skill_updater import SkillUpdater
from mycroft.messagebus import MessageBusClient
SKILL_MAIN_MODULE = '__init__.py'
class UploadQueue:
"""Queue for holding loaders with data that still needs to be uploaded.
This queue can be used during startup to capture all loaders
and then processing can be triggered at a later stage when the system is
connected to the backend.
After all queued settingsmeta has been processed and the queue is empty
the queue will set the self.started flag.
"""
def __init__(self):
self._queue = []
self.started = False
self.lock = Lock()
def start(self):
"""Start processing of the queue."""
self.started = True
self.send()
def stop(self):
"""Stop the queue, and hinder any further transmissions."""
self.started = False
def send(self):
"""Loop through all stored loaders triggering settingsmeta upload."""
with self.lock:
queue = self._queue
self._queue = []
if queue:
LOG.info('New Settings meta to upload.')
for loader in queue:
if self.started:
loader.instance.settings_meta.upload()
else:
break
def __len__(self):
return len(self._queue)
def put(self, loader):
"""Append a skill loader to the queue.
If a loader is already present it's removed in favor of the new entry.
"""
if self.started:
LOG.info('Updating settings meta during runtime...')
with self.lock:
# Remove existing loader
self._queue = [e for e in self._queue if e != loader]
self._queue.append(loader)
def _shutdown_skill(instance):
"""Shutdown a skill.
Call the default_shutdown method of the skill, will produce a warning if
the shutdown process takes longer than 1 second.
Args:
instance (MycroftSkill): Skill instance to shutdown
"""
try:
ref_time = monotonic()
# Perform the shutdown
instance.default_shutdown()
shutdown_time = monotonic() - ref_time
if shutdown_time > 1:
LOG.warning(f'{instance.skill_id} shutdown took {shutdown_time} seconds')
except Exception:
LOG.exception(f'Failed to shut down skill: {instance.skill_id}')
def on_started():
LOG.info('Skills Manager is starting up.')
def on_alive():
LOG.info('Skills Manager is alive.')
def on_ready():
LOG.info('Skills Manager is ready.')
def on_error(e='Unknown'):
LOG.info(f'Skills Manager failed to launch ({e})')
def on_stopping():
LOG.info('Skills Manager is shutting down...')
class SkillManager(Thread):
def __init__(self, bus, watchdog=None, alive_hook=on_alive, started_hook=on_started, ready_hook=on_ready,
error_hook=on_error, stopping_hook=on_stopping):
"""Constructor
Args:
bus (event emitter): Mycroft messagebus connection
watchdog (callable): optional watchdog function
"""
super(SkillManager, self).__init__()
self.bus = bus
# Set watchdog to argument or function returning None
self._watchdog = watchdog or (lambda: None)
callbacks = StatusCallbackMap(on_started=started_hook,
on_alive=alive_hook,
on_ready=ready_hook,
on_error=error_hook,
on_stopping=stopping_hook)
self.status = ProcessStatus('skills', callback_map=callbacks)
self.status.set_started()
self._stop_event = Event()
self._connected_event = Event()
self.config = Configuration.get()
self.upload_queue = UploadQueue()
self.skill_loaders = {}
self.plugin_skills = {}
self.enclosure = EnclosureAPI(bus)
self.initial_load_complete = False
self.num_install_retries = 0
self.settings_downloader = SkillSettingsDownloader(self.bus)
self.empty_skill_dirs = set() # Save a record of empty skill dirs.
self.skill_updater = SkillUpdater()
self._define_message_bus_events()
self.daemon = True
self.status.bind(self.bus)
def _define_message_bus_events(self):
"""Define message bus events with handlers defined in this class."""
# Update on initial connection
self.bus.on(
'mycroft.internet.connected',
lambda x: self._connected_event.set()
)
# Update upon request
self.bus.on('skillmanager.list', self.send_skill_list)
self.bus.on('skillmanager.deactivate', self.deactivate_skill)
self.bus.on('skillmanager.keep', self.deactivate_except)
self.bus.on('skillmanager.activate', self.activate_skill)
self.bus.on('mycroft.paired', self.handle_paired)
self.bus.on(
'mycroft.skills.settings.update',
self.settings_downloader.download
)
self.bus.on('mycroft.skills.trained',
self.handle_check_device_readiness)
def is_device_ready(self):
is_ready = False
# different setups will have different needs
# eg, a server does not care about audio
# pairing -> device is paired
# internet -> device is connected to the internet - NOT IMPLEMENTED
# skills -> skills reported ready
# speech -> stt reported ready
# audio -> audio playback reported ready
# gui -> gui websocket reported ready - NOT IMPLEMENTED
# enclosure -> enclosure/HAL reported ready - NOT IMPLEMENTED
services = {k: False for k in
self.config.get("ready_settings", ["skills"])}
start = monotonic()
while not is_ready:
is_ready = self.check_services_ready(services)
if is_ready:
break
elif monotonic() - start >= 60:
raise TimeoutError(
f'Timeout waiting for services start. services={services}')
else:
sleep(3)
return is_ready
def handle_check_device_readiness(self, message):
ready = False
while not ready:
try:
ready = self.is_device_ready()
except TimeoutError:
if is_paired():
LOG.warning("mycroft should already have reported ready!")
sleep(5)
LOG.info("Mycroft is all loaded and ready to roll!")
self.bus.emit(message.reply('mycroft.ready'))
def check_services_ready(self, services):
"""Report if all specified services are ready.
services (iterable): service names to check.
"""
for ser in services:
services[ser] = False
if ser == "pairing":
services[ser] = is_paired()
continue
elif ser in ["gui", "enclosure"]:
# not implemented
services[ser] = True
continue
response = self.bus.wait_for_response(
Message(f'mycroft.{ser}.is_ready'))
if response and response.data['status']:
services[ser] = True
return all([services[ser] for ser in services])
@property
def skills_config(self):
return self.config['skills']
@property
def msm(self):
"""DEPRECATED: do not use, method only for api backwards compatibility
Logs a warning and returns None
"""
return None
@staticmethod
def create_msm():
"""DEPRECATED: do not use, method only for api backwards compatibility
Logs a warning and returns None
"""
return None
def schedule_now(self, _):
"""DEPRECATED: do not use, method only for api backwards compatibility
Logs a warning
"""
def _start_settings_update(self):
LOG.info('Start settings update')
self.skill_updater.post_manifest(reload_skills_manifest=True)
self.upload_queue.start()
LOG.info('All settings meta has been processed or upload has started')
self.settings_downloader.download()
LOG.info('Skill settings downloading has started')
def handle_paired(self, _):
"""Trigger upload of skills manifest after pairing."""
self._start_settings_update()
def load_plugin_skills(self):
plugins = find_skill_plugins()
loaded_skill_ids = [basename(p) for p in self.skill_loaders]
for skill_id, plug in plugins.items():
if skill_id not in self.plugin_skills and skill_id not in loaded_skill_ids:
self._load_plugin_skill(skill_id, plug)
def _load_plugin_skill(self, skill_id, skill_plugin):
skill_loader = PluginSkillLoader(self.bus, skill_id)
try:
load_status = skill_loader.load(skill_plugin)
except Exception:
LOG.exception(f'Load of skill {skill_id} failed!')
load_status = False
finally:
self.plugin_skills[skill_id] = skill_loader
return skill_loader if load_status else None
def load_priority(self):
skill_ids = {os.path.basename(skill_path): skill_path
for skill_path in self._get_skill_directories()}
priority_skills = self.skills_config.get("priority_skills") or []
for skill_id in priority_skills:
skill_path = skill_ids.get(skill_id)
if skill_path is not None:
loader = self._load_skill(skill_path)
if loader:
self.upload_queue.put(loader)
else:
LOG.error(f'Priority skill {skill_id} can\'t be found')
self.status.set_alive()
def run(self):
"""Load skills and update periodically from disk and internet."""
self._remove_git_locks()
self.load_priority()
if self.skills_config.get("wait_for_internet", True):
while not connected() and not self._connected_event.is_set():
sleep(1)
self._connected_event.set()
self._load_on_startup()
# Sync backend and skills.
if is_paired() and not self.upload_queue.started:
self.skill_updater.post_manifest()
self._start_settings_update()
self.status.set_ready()
# Scan the file folder that contains Skills. If a Skill is updated,
# unload the existing version from memory and reload from the disk.
while not self._stop_event.is_set():
try:
self._unload_removed_skills()
self._reload_modified_skills()
self._load_new_skills()
self._watchdog()
sleep(2) # Pause briefly before beginning next scan
except Exception:
LOG.exception('Something really unexpected has occured '
'and the skill manager loop safety harness was '
'hit.')
sleep(30)
def _remove_git_locks(self):
"""If git gets killed from an abrupt shutdown it leaves lock files."""
for skills_dir in get_skill_directories():
lock_path = os.path.join(skills_dir, '*/.git/index.lock')
for i in glob(lock_path):
LOG.warning('Found and removed git lock file: ' + i)
os.remove(i)
def _load_on_startup(self):
"""Handle initial skill load."""
self.load_plugin_skills()
LOG.info('Loading installed skills...')
self._load_new_skills()
LOG.info("Skills all loaded!")
self.bus.emit(Message('mycroft.skills.initialized'))
def _reload_modified_skills(self):
"""Handle reload of recently changed skill(s)"""
for skill_dir, skill_loader in self.skill_loaders.items():
try:
if skill_loader is not None and skill_loader.reload_needed():
# If reload succeed add settingsmeta to upload queue
if skill_loader.reload():
self.upload_queue.put(skill_loader)
except Exception:
LOG.exception(f'Unhandled exception occured while reloading {skill_dir}')
def _load_new_skills(self):
"""Handle load of skills installed since startup."""
for skill_dir in self._get_skill_directories():
replaced_skills = []
# by definition skill_id == folder name
skill_id = os.path.basename(skill_dir)
# a local source install is replacing this plugin, unload it!
if skill_id in self.plugin_skills:
LOG.info(f"{skill_id} plugin will be replaced by a local version: {skill_dir}")
self._unload_plugin_skill(skill_id)
for old_skill_dir, skill_loader in self.skill_loaders.items():
if old_skill_dir != skill_dir and \
skill_loader.skill_id == skill_id:
# a higher priority equivalent has been detected!
replaced_skills.append(old_skill_dir)
for old_skill_dir in replaced_skills:
# unload the old skill
self._unload_skill(old_skill_dir)
if skill_dir not in self.skill_loaders:
loader = self._load_skill(skill_dir)
if loader:
self.upload_queue.put(loader)
def _load_skill(self, skill_directory):
if not self.config["websocket"].get("shared_connection", True):
# see BusBricker | |
<gh_stars>0
"""
Parsers for GAF and various Association TSVs.
All parser objects instantiate a subclass of the abstract `AssocParser` object
"""
# TODO: Refactor - move some stuff out into generic parser object
import re
import requests
import tempfile
from contextlib import closing
import subprocess
import logging
import io
import gzip
import datetime
import dateutil.parser
from ontobio import ontol
from ontobio import ecomap
TAXON = 'TAXON'
ENTITY = 'ENTITY'
ANNOTATION = 'ANNOTATION'
EXTENSION = 'EXTENSION'
def write_to_file(optional_file, text):
if optional_file:
optional_file.write(text)
class ParseResult(object):
def __init__(self, parsed_line, associations, skipped, evidence_used=None):
self.parsed_line = parsed_line
self.associations = associations
self.skipped = skipped
self.evidence_used = evidence_used
class AssocParserConfig():
"""
Configuration for an association parser
"""
def __init__(self,
remove_double_prefixes=False,
ontology=None,
repair_obsoletes=True,
entity_map=None,
valid_taxa=None,
class_idspaces=None,
entity_idspaces=None,
ecomap=ecomap.EcoMap(),
exclude_relations=[],
include_relations=[],
filter_out_evidence=[],
filtered_evidence_file=None):
self.remove_double_prefixes=remove_double_prefixes
self.ontology=ontology
self.repair_obsoletes=repair_obsoletes
self.entity_map=entity_map
self.valid_taxa=valid_taxa
self.class_idspaces=class_idspaces
self.ecomap=ecomap
self.include_relations=include_relations
self.exclude_relations=exclude_relations
self.filter_out_evidence = filter_out_evidence
self.filtered_evidence_file = filtered_evidence_file
class Report():
"""
A report object that is generated as a result of a parse
"""
# Levels
FATAL = 'FATAL'
ERROR = 'ERROR'
WARNING = 'WARNING'
# Warnings: TODO link to gorules
INVALID_ID = "Invalid identifier"
UNKNOWN_ID = "Unknown identifier"
INVALID_IDSPACE = "Invalid identifier prefix"
INVALID_TAXON = "Invalid taxon"
INVALID_SYMBOL = "Invalid symbol"
INVALID_DATE = "Invalid date"
UNMAPPED_ID = "Unmapped identifier"
UNKNOWN_EVIDENCE_CLASS = "Unknown evidence class"
OBSOLETE_CLASS = "Obsolete class"
OBSOLETE_CLASS_NO_REPLACEMENT = "Obsolete class with no replacement"
WRONG_NUMBER_OF_COLUMNS = "Wrong number of columns in this line"
EXTENSION_SYNTAX_ERROR = "Syntax error in annotation extension field"
"""
3 warning levels
"""
LEVELS = [FATAL, ERROR, WARNING]
def __init__(self):
self.messages = []
self.n_lines = 0
self.n_assocs = 0
self.skipped = []
self.subjects = set()
self.objects = set()
self.taxa = set()
self.references = set()
self.max_messages = 10000
def error(self, line, type, obj, msg=""):
self.message(self.ERROR, line, type, obj, msg)
def warning(self, line, type, obj, msg=""):
self.message(self.WARNING, line, type, obj, msg)
def message(self, level, line, type, obj, msg=""):
# Only keep max_messages number of messages
if len(self.messages) > self.max_messages:
# TODO: ensure the message is captured if we are streaming
return
self.messages.append({'level':level,
'line':line,
'type':type,
'message':msg,
'obj':obj})
def add_associations(self, associations):
for a in associations:
self.add_association(a)
def add_association(self, association):
self.n_assocs += 1
# self.subjects.add(association['subject']['id'])
# self.objects.add(association['object']['id'])
# self.references.update(association['evidence']['has_supporting_reference'])
# if 'taxon' in association['subject']:
# self.taxa.add(association['subject']['taxon']['id'])
def report_parsed_result(self, result, output_file, evidence_filtered_file, evidence_to_filter):
self.n_lines += 1
if result.skipped:
logging.info("SKIPPING: {}".format(result.parsed_line))
self.skipped.append(result.parsed_line)
else:
self.add_associations(result.associations)
if result.evidence_used not in evidence_to_filter:
write_to_file(evidence_filtered_file, result.parsed_line + "\n")
def short_summary(self):
return "Parsed {} assocs from {} lines. Skipped: {}".format(self.n_assocs, self.n_lines, len(self.skipped))
def to_report_json(self):
"""
Generate a summary in json format
"""
N = 10
report = dict(
summary = dict(association_count = self.n_assocs,
line_count = self.n_lines,
skipped_line_count = len(self.skipped)),
aggregate_statistics = dict(subject_count=len(self.subjects),
object_count=len(self.objects),
taxon_count=len(self.taxa),
reference_count=len(self.references),
taxon_sample=list(self.taxa)[0:N],
subject_sample=list(self.subjects)[0:N],
object_sample=list(self.objects)[0:N])
)
# grouped messages
gm = {}
for level in self.LEVELS:
gm[level] = []
for m in self.messages:
level = m['level']
gm[level].append(m)
mgroups = []
for level in self.LEVELS:
msgs = gm[level]
mgroup = dict(level=level,
count=len(msgs),
messages=msgs)
mgroups.append(mgroup)
report['groups'] = mgroups
return report
def to_markdown(self):
"""
Generate a summary in markdown format
"""
json = self.to_report_json()
summary = json['summary']
s = ""
s += "\n## SUMMARY\n\n"
s += " * Associations: {}\n" . format(summary['association_count'])
s += " * Lines in file (incl headers): {}\n" . format(summary['line_count'])
s += " * Lines skipped: {}\n" . format(summary['skipped_line_count'])
stats = json['aggregate_statistics']
s += "\n## STATISTICS\n\n"
for k,v in stats.items():
s += " * {}: {}\n" . format(k,v)
s += "\n## MESSAGES\n\n"
for g in json['groups']:
s += " * {}: {}\n".format(g['level'], g['count'])
s += "\n\n"
for g in json['groups']:
level = g['level']
msgs = g['messages']
if len(msgs) > 0:
s += "### {}\n\n".format(level)
for m in msgs:
s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line'])
return s
# TODO avoid using names that are builtin python: file, id
class AssocParser(object):
"""
Abstract superclass of all association parser classes
"""
def parse(self, file, outfile=None):
"""Parse a line-oriented association file into a list of association dict objects
Note the returned list is of dict objects. TODO: These will
later be specified using marshmallow and it should be possible
to generate objects
Arguments
---------
file : file or string
The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file
outfile : file
Optional output file in which processed lines are written. This a file or `file-like-object`
Return
------
list
Associations generated from the file
"""
associations = self.association_generator(file, outfile=outfile)
a = list(associations)
return a
def association_generator(self, file, outfile=None):
"""
Returns a generator that yields successive associations from file
Yields
------
association
"""
file = self._ensure_file(file)
for line in file:
parsed_result = self.parse_line(line)
self.report.report_parsed_result(parsed_result, outfile, self.config.filtered_evidence_file, self.config.filter_out_evidence)
for association in parsed_result.associations:
yield association
logging.info(self.report.short_summary())
file.close()
def generate_associations(self, line, outfile=None):
associations = self.association_generator(line, outfile=outfile)
for association in associations:
pass
def validate_line(self, line):
if line == "":
self.report.warning(line, Report.WRONG_NUMBER_OF_COLUMNS, "",
msg="empty line")
return ParseResult(line, [], True)
def _validate_assoc(self, assoc, line):
"""
Performs validation on an ontology association structure.
Currently the only validation is checking the ontology class (object) id against the loaded ontology
"""
self._validate_ontology_class_id(assoc["object"]["id"], line)
def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None):
"""
Map a file to a subset, writing out results
You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings
Arguments
---------
file: file
Name or file object for input assoc file
outfile: file
Name or file object for output (mapped) assoc file; writes to stdout if not set
subset: str
Optional name of subset to map to, e.g. goslim_generic
class_map: dict
Mapping between asserted class ids and ids to map to. Many to many
ontology: `Ontology`
Ontology to extract subset from
"""
if subset is not None:
logging.info("Creating mapping for subset: {}".format(subset))
class_map = ontology.create_slim_mapping(subset=subset, relations=relations)
if class_map is None:
raise ValueError("Neither class_map not subset is set")
col = self.ANNOTATION_CLASS_COLUMN
file = self._ensure_file(file)
tuples = []
for line in file:
if line.startswith("!"):
continue
vals = line.split("\t")
logging.info("LINE: {} VALS: {}".format(line, vals))
if len(vals) < col:
raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col))
cid = vals[col]
if cid not in class_map or len(class_map[cid]) == 0:
self.report.error(line, Report.UNMAPPED_ID, cid)
continue
else:
for mcid in class_map[cid]:
vals[col] = mcid
line = "\t".join(vals)
if outfile is not None:
outfile.write(line)
else:
print(line)
def skim(self, file):
"""
Lightweight parse of a file into tuples.
Note this discards metadata such as evidence.
Return a list of tuples (subject_id, subject_label, object_id)
"""
raise NotImplementedError("AssocParser.skim not implemented")
def parse_line(self, line):
raise NotImplementedError("AssocParser.parse_line not implemented")
def _skipping_line(self, associations):
return associations is None or associations == []
def _is_exclude_relation(self, relation):
if self.config.include_relations is not None and len(self.config.include_relations)>0:
if relation not in self.config.include_relations:
return True
if self.config.exclude_relations is not None and len(self.config.exclude_relations)>0:
if relation in self.config.exclude_relations:
return True
return False
## we generate both qualifier and relation field
## Returns: (negated, relation, other_qualifiers)
def _parse_qualifier(self, qualifier, aspect):
relation = None
qualifiers = qualifier.split("|")
if qualifier == '':
qualifiers = []
negated = 'NOT' in qualifiers
other_qualifiers = [q for q in qualifiers if q != 'NOT']
## In GAFs, relation is overloaded into qualifier.
## If no explicit non-NOT qualifier is specified, use
## a default based on GPI spec
if len(other_qualifiers) > 0:
relation = other_qualifiers[0]
else:
if aspect == 'C':
relation = 'part_of'
elif aspect == 'P':
relation = 'involved_in'
elif aspect == 'F':
relation = 'enables'
else:
relation = None
return (negated, relation, other_qualifiers)
# split an ID/CURIE into prefix and local parts
# (not currently used)
def _parse_id(self, id):
toks = id.split(":")
if len(toks) == 2:
return (toks[0],toks[1])
else:
return (toks[0],toks[1:].join(":"))
# split an ID/CURIE into prefix and local parts
def _get_id_prefix(self, id):
toks = id.split(":")
return toks[0]
def _validate_taxon(self, taxon, line):
if self.config.valid_taxa is None:
return True
else:
if taxon in self.config.valid_taxa:
return True
else:
self.report.error(line, Report.INVALID_TAXON, taxon)
return False
# check the term id is in the ontology, and is not obsolete
def _validate_ontology_class_id(self, id, line, subclassof=None):
ont = self.config.ontology
if ont is None:
return id
if not ont.has_node(id):
self.report.warning(line, Report.UNKNOWN_ID, id)
return id
if | |
# QLocator
# Copyright (C) 2020-2022 qcomixdev
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
import random
import math
import re
def elideRichText(richText: str, maxWidth: int, widget, elideFromLeft: bool):
doc = QG.QTextDocument()
opt = QG.QTextOption()
opt.setWrapMode(QG.QTextOption.NoWrap)
doc.setDefaultTextOption(opt)
doc.setDocumentMargin(0)
doc.setHtml(richText)
doc.adjustSize()
if doc.size().width() > maxWidth:
cursor = QG.QTextCursor (doc)
if elideFromLeft:
cursor.movePosition(QG.QTextCursor.Start)
else:
cursor.movePosition(QG.QTextCursor.End)
elidedPostfix = "…"
metric = QG.QFontMetrics(widget.font())
postfixWidth = metric.horizontalAdvance(elidedPostfix)
while doc.size().width() > maxWidth - postfixWidth:
if elideFromLeft:
cursor.deleteChar()
else:
cursor.deletePreviousChar()
doc.adjustSize()
cursor.insertText(elidedPostfix)
return doc.toHtml()
return richText
class FocusEventFilter(QC.QObject):
focused = QC.Signal()
def __init__(self, parent = None):
super().__init__(parent)
def eventFilter(self, object, event) -> bool:
if event.type() == QC.QEvent.FocusIn:
self.focused.emit()
return False
class QLocatorSearchResult:
def __init__(self, id: int, defaultIconPath: str, selectedIconPath: str, closeOnActivated: bool, text: list, toggled: bool = False, toggledIconPath: str = "", toggledSelectedIconPath: str = ""):
self.id = id
self.defaultIconPath = defaultIconPath
self.selectedIconPath = selectedIconPath
self.closeOnActivated = closeOnActivated
self.text = text
self.toggled = toggled
self.toggledIconPath = toggledIconPath
self.toggledSelectedIconPath = toggledSelectedIconPath
class QLocatorTitleWidget(QW.QWidget):
def __init__(self, title: str, iconPath: str, height: int, shouldRemainHidden: bool, parent = None):
super().__init__(parent)
self.icon = QG.QIcon(iconPath)
self.iconHeight = height - 2
self.setLayout(QW.QHBoxLayout())
self.iconLabel = QW.QLabel()
self.iconLabel.setFixedHeight(self.iconHeight)
self.iconLabel.setPixmap(self.icon.pixmap(self.iconHeight, self.iconHeight))
self.titleLabel = QW.QLabel()
self.titleLabel.setText(title)
self.titleLabel.setTextFormat(QC.Qt.RichText)
self.countLabel = QW.QLabel()
self.layout().setContentsMargins(4, 1, 4, 1)
self.layout().addWidget(self.iconLabel)
self.layout().addWidget(self.titleLabel)
self.layout().addStretch(1)
self.layout().addWidget(self.countLabel)
self.layout().setAlignment(self.countLabel, QC.Qt.AlignVCenter)
self.layout().setAlignment(self.iconLabel, QC.Qt.AlignVCenter)
self.layout().setAlignment(self.titleLabel, QC.Qt.AlignVCenter)
titleFont = self.titleLabel.font()
titleFont.setBold(True)
self.titleLabel.setFont(titleFont)
self.setFixedHeight(height)
self.shouldRemainHidden = shouldRemainHidden
def updateData(self, count: int):
self.countLabel.setText(str(count))
def paintEvent(self, event):
opt = QW.QStyleOption()
opt.initFrom(self)
p = QG.QPainter(self)
self.style().drawPrimitive(QW.QStyle.PE_Widget, opt, p, self)
class QLocatorResultWidget(QW.QWidget):
up = QC.Signal()
down = QC.Signal()
activated = QC.Signal(int, int, bool)
entered = QC.Signal()
def __init__(self, keyEventTarget: QW.QWidget, height: int, primaryTextWidth: int, secondaryTextWidth: int, parent = None):
super().__init__(parent)
self.iconHeight = height - 2
self.setObjectName("unselectedLocatorResult")
self.keyEventTarget = keyEventTarget
self.setLayout(QW.QHBoxLayout())
self.iconLabel = QW.QLabel(self)
self.iconLabel.setFixedHeight(self.iconHeight)
self.mainTextLabel = QW.QLabel(self)
self.primaryTextWidth = primaryTextWidth
self.mainTextLabel.setMinimumWidth(primaryTextWidth)
self.mainTextLabel.setTextFormat(QC.Qt.RichText)
self.mainTextLabel.setTextInteractionFlags(QC.Qt.NoTextInteraction)
self.secondaryTextLabel = QW.QLabel(self)
self.secondaryTextWidth = secondaryTextWidth
self.secondaryTextLabel.setMaximumWidth(secondaryTextWidth)
self.secondaryTextLabel.setTextFormat(QC.Qt.RichText)
self.secondaryTextLabel.setTextInteractionFlags(QC.Qt.NoTextInteraction)
self.layout().setContentsMargins(4, 1, 4, 1)
self.layout().addWidget(self.iconLabel)
self.layout().addWidget(self.mainTextLabel)
self.layout().addStretch(1)
self.layout().addWidget(self.secondaryTextLabel)
self.layout().setAlignment(self.mainTextLabel, QC.Qt.AlignVCenter)
self.layout().setAlignment(self.iconLabel, QC.Qt.AlignVCenter)
self.layout().setAlignment(self.secondaryTextLabel, QC.Qt.AlignVCenter)
self.setFixedHeight(height)
self.setSizePolicy(QW.QSizePolicy.Expanding, QW.QSizePolicy.Fixed)
self.activateEnterShortcut = QW.QShortcut(QG.QKeySequence(QC.Qt.Key_Enter), self)
self.activateEnterShortcut.setContext(QC.Qt.WidgetShortcut)
self.activateReturnShortcut = QW.QShortcut(QG.QKeySequence(QC.Qt.Key_Return), self)
self.activateReturnShortcut.setContext(QC.Qt.WidgetShortcut)
self.upShortcut = QW.QShortcut(QG.QKeySequence(QC.Qt.Key_Up), self)
self.upShortcut.setContext(QC.Qt.WidgetShortcut)
self.downShortcut = QW.QShortcut(QG.QKeySequence(QC.Qt.Key_Down), self)
self.downShortcut.setContext(QC.Qt.WidgetShortcut)
self.selectedPalette = self.palette()
self.selectedPalette.setColor(QG.QPalette.Window, QG.QPalette().color(QG.QPalette.WindowText))
self.selectedPalette.setColor(QG.QPalette.WindowText, QG.QPalette().color(QG.QPalette.Window))
self.id = -1
self.providerIndex = -1
self.closeOnActivated = False
self.selected = False
self.defaultStylingEnabled = True
self.currentIcon = QG.QIcon()
self.currentToggledIcon = QG.QIcon()
self.toggled = False
self.activateEnterShortcut.activated.connect(self.activate)
self.activateReturnShortcut.activated.connect(self.activate)
self.upShortcut.activated.connect(self.up)
self.downShortcut.activated.connect(self.down)
def paintEvent(self, event):
opt = QW.QStyleOption()
opt.initFrom(self)
p = QG.QPainter(self)
self.style().drawPrimitive(QW.QStyle.PE_Widget, opt, p, self)
def enterEvent(self, event):
self.entered.emit()
def mousePressEvent(self, event):
self.entered.emit()
def mouseReleaseEvent(self, event):
self.activate()
def activate(self):
if not self.closeOnActivated:
self.toggled = not self.toggled
iconToUse = self.currentIcon if not self.toggled else self.currentToggledIcon
self.iconLabel.setPixmap(iconToUse.pixmap(self.iconHeight, self.iconHeight, QG.QIcon.Selected if self.selected else QG.QIcon.Normal))
self.activated.emit(self.providerIndex, self.id, self.closeOnActivated)
def updateData(self, providerIndex: int, data: QLocatorSearchResult):
self.toggled = data.toggled
self.currentIcon = QG.QIcon()
self.currentIcon.addFile(data.defaultIconPath, QC.QSize(), QG.QIcon.Normal)
self.currentIcon.addFile(data.selectedIconPath, QC.QSize(), QG.QIcon.Selected)
self.currentToggledIcon = QG.QIcon()
self.currentToggledIcon.addFile(data.toggledIconPath, QC.QSize(), QG.QIcon.Normal)
self.currentToggledIcon.addFile(data.toggledSelectedIconPath, QC.QSize(), QG.QIcon.Selected)
iconToUse = self.currentIcon if not self.toggled else self.currentToggledIcon
self.iconLabel.setPixmap(iconToUse.pixmap(self.iconHeight, self.iconHeight, QG.QIcon.Selected if self.selected else QG.QIcon.Normal))
self.mainTextLabel.clear()
self.secondaryTextLabel.clear()
if len(data.text) > 0:
self.mainTextLabel.setText(elideRichText(data.text[0], self.primaryTextWidth, self.mainTextLabel, False))
if len(data.text) > 1:
self.secondaryTextLabel.setText(elideRichText(data.text[1], self.secondaryTextWidth, self.secondaryTextLabel, True))
self.id = data.id
self.closeOnActivated = data.closeOnActivated
self.providerIndex = providerIndex
def setDefaultStylingEnabled(self, enabled: bool):
if self.defaultStylingEnabled and not enabled: self.setPalette(QG.QPalette())
self.defaultStylingEnabled = enabled
def setSelected(self, selected: bool):
self.selected = selected
if selected:
self.setObjectName("selectedLocatorResult")
if self.defaultStylingEnabled: self.setPalette(self.selectedPalette)
self.style().unpolish(self)
self.style().polish(self)
self.setFocus()
else:
self.setObjectName("unselectedLocatorResult")
if self.defaultStylingEnabled: self.setPalette(QG.QPalette())
self.style().unpolish(self)
self.style().polish(self)
iconToUse = self.currentIcon if not self.toggled else self.currentToggledIcon
self.iconLabel.setPixmap(iconToUse.pixmap(self.iconHeight, self.iconHeight, QG.QIcon.Selected if self.selected else QG.QIcon.Normal))
def keyPressEvent(self, ev: QG.QKeyEvent):
if ev.key() != QC.Qt.Key_Up and ev.key() != QC.Qt.Key_Down and ev.key() != QC.Qt.Key_Enter and ev.key() != QC.Qt.Key_Return:
QW.QApplication.postEvent(self.keyEventTarget, QG.QKeyEvent(ev.type(), ev.key(), ev.modifiers(), ev.text(), ev.isAutoRepeat()))
self.keyEventTarget.setFocus()
else:
super().keyPressEvent(self, ev)
class QAbstractLocatorSearchProvider(QC.QObject):
resultsAvailable = QC.Signal(int, list)
def __init__(self, parent = None):
super().__init__(parent)
class QExampleSearchProvider(QAbstractLocatorSearchProvider):
def __init__(self, parent = None):
super().__init__(parent)
def title(self):
return "Example search provider"
def suggestedReservedItemCount(self):
return 32
def resultSelected(self, resultID: int):
pass
def processQuery(self, query: str, context, jobID: int):
resCount = random.randint(0, 50)
results = []
for i in range(resCount):
randomStr = str()
for j in range(5):
randomStr += str(chr(97+random.randint(0, 26)))
txt = []
txt.append("Result <b>text</b> #" + str(i) + randomStr)
txt.append("Secondary result text")
results.append(QLocatorSearchResult(0, "icon.svg", "icon.svg", True, txt, False, "icon.svg", "icon.svg"))
self.resultsAvailable.emit(jobID, results)
def stopJobs(self, jobs):
pass
def hideTitle(self):
return False
def titleIconPath(self):
return "icon.svg"
class QCalculatorSearchProvider(QAbstractLocatorSearchProvider):
def __init__(self, parent = None):
super().__init__(parent)
self.safeEnv = {
'ceil': math.ceil,
'abs': abs,
'floor': math.floor,
'gcd': math.gcd,
'exp': math.exp,
'log': math.log,
'log2': math.log2,
'log10': math.log10,
'pow': math.pow,
'sqrt': math.sqrt,
'acos': math.acos,
'asin': math.asin,
'atan': math.atan,
'atan2': math.atan2,
'cos': math.cos,
'hypot': math.hypot,
'sin': math.sin,
'tan': math.tan,
'degrees': math.degrees,
'radians': math.radians,
'acosh': math.acosh,
'asinh': math.asinh,
'atanh': math.atanh,
'cosh': math.cosh,
'sinh': math.sinh,
'tanh': math.tanh,
'erf': math.erf,
'erfc': math.erfc,
'gamma': math.gamma,
'lgamma': math.lgamma,
'pi': math.pi,
'e': math.e,
'inf': math.inf,
'randint': random.randint,
'random': random.random,
'factorial': math.factorial
}
self.safePattern = re.compile("^("+"|".join(self.safeEnv.keys())+r"|[0-9.*+\-%/()]|\s" + ")+$")
def processQuery(self, query: str, context, jobID: int):
try:
if len(query.strip()) and self.safePattern.match(query):
result = str(eval(query, {"__builtins__": {}}, self.safeEnv))
try:
int(result)
except:
result = str(float(result))
self.resultsAvailable.emit(jobID, [QLocatorSearchResult(0, self.iconPath(), self.selectedIconPath(), False, [result,"Calculator"], False, self.iconPath(), self.selectedIconPath())])
except:
pass
def title(self):
return str()
def suggestedReservedItemCount(self):
return 1
def resultSelected(self, resultID: int):
pass
def stopJobs(self, jobs):
pass
def hideTitle(self):
return True
def titleIconPath(self):
return str()
def selectedIconPath(self):
return str()
def iconPath(self):
return str()
class QLocatorWidget(QW.QWidget):
finished = QC.Signal()
def __init__(self, parent = None, width: int = 600, resultHeight: int = 36, titleHeight: int = 36, primaryTextWidth: int = 320, secondaryTextWidth: int = 200, maxVisibleItemCount: int = 8):
super().__init__(parent)
self.alignment = QC.Qt.AlignCenter
self.resultHeight = resultHeight
self.titleHeight = titleHeight
self.primaryTextWidth = primaryTextWidth
self.locator = None
self.secondaryTextWidth = secondaryTextWidth
self.maxVisibleItemCount = maxVisibleItemCount
self.reservedItemCounts = []
self.visibleResultItemCounts = []
self.currentJobIds = []
self.titleItems = []
self.resultItems = []
self.escapeShortcuts = []
self.selectedLayoutItemIndex = 0
self.defaultStylingEnabled = True
self.context = None
self.lastQuery = str()
self.setVisible(False)
self.setLayout(QW.QVBoxLayout())
self.searchEdit = QW.QLineEdit()
self.resultList = QW.QScrollArea()
self.resultLayout = QW.QVBoxLayout()
self.resultList.setWidget(QW.QWidget())
self.resultList.widget().setLayout(self.resultLayout)
self.resultList.setWidgetResizable(True)
self.resultLayout.setSizeConstraint(QW.QLayout.SetMinAndMaxSize)
self.layout().addWidget(self.searchEdit)
self.layout().addWidget(self.resultList)
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
self.resultLayout.setContentsMargins(0, 0, 0, 0)
self.resultLayout.setSpacing(0)
self.setFixedWidth(width)
self.setWindowFlags(QC.Qt.FramelessWindowHint | QC.Qt.WindowStaysOnTopHint | QC.Qt.CustomizeWindowHint | QC.Qt.Popup)
self.resultList.setSizeAdjustPolicy(QW.QAbstractScrollArea.AdjustToContents)
self.setSizePolicy(QW.QSizePolicy.Fixed, QW.QSizePolicy.Maximum)
self.setEscapeShortcuts([QC.Qt.Key_Escape])
self.editorDownShortcut = QW.QShortcut(QG.QKeySequence(QC.Qt.Key_Down), self.searchEdit)
self.editorDownShortcut.setContext(QC.Qt.WidgetShortcut)
self.editorDownShortcut.activated.connect(self.handleEditorDown)
def handleTextEdited():
for i in range(len(self.resultItems)):
for it in self.resultItems[i]: self.setResultVisible(it, False)
self.setResultVisible(self.titleItems[i], False)
self.selectedLayoutItemIndex = 0
self.updateResultListHeight()
self.searchEdit.textEdited.connect(handleTextEdited)
def handleSearchFocused():
if self.selectedLayoutItemIndex < self.resultLayout.count():
widget = self.resultLayout.itemAt(self.selectedLayoutItemIndex).widget()
if widget:
if isinstance(widget, QLocatorResultWidget):
widget.setSelected(False)
self.selectedLayoutItemIndex = 0
filter = FocusEventFilter()
self.searchEdit.installEventFilter(filter)
filter.focused.connect(handleSearchFocused)
self.queryTimer = QC.QTimer(self)
self.queryTimer.setInterval(0)
self.queryTimer.setSingleShot(True)
def handleQueryTimeout():
if not self.locator: return
self.currentJobIds = self.locator.query(self.lastQuery, self.context)
self.queryTimer.timeout.connect(handleQueryTimeout)
self.searchEdit.textEdited.connect(self.queryLocator)
self.updateResultListHeight()
def setAlignment( self, alignment ):
if alignment == QC.Qt.AlignCenter:
self.alignment = alignment
self.updateAlignment()
elif alignment == QC.Qt.AlignTop:
self.alignment = alignment
self.updateAlignment()
def updateAlignment( self ):
widget = self
while True:
parent = widget.parentWidget()
if not parent:
break
else:
widget = parent
screenRect = QW.QApplication.primaryScreen().availableGeometry()
if widget != self: # there is a parent
screenRect = widget.geometry()
if self.alignment == QC.Qt.AlignCenter:
centerRect = QW.QStyle.alignedRect(QC.Qt.LeftToRight, QC.Qt.AlignCenter, self.size(), screenRect)
centerRect.setY(max(0, centerRect.y() - self.resultHeight * 4))
self.setGeometry(centerRect)
elif self.alignment == QC.Qt.AlignTop:
rect = QW.QStyle.alignedRect(QC.Qt.LeftToRight, QC.Qt.AlignHCenter | QC.Qt.AlignTop, self.size(), screenRect)
self.setGeometry(rect)
def paintEvent(self, event):
opt = QW.QStyleOption()
opt.initFrom(self)
p = QG.QPainter(self)
self.style().drawPrimitive(QW.QStyle.PE_Widget, opt, p, self)
def providerAdded(self, title: str, titleIconPath: str, suggestedReservedItemCount: int, hideTitle: bool):
newTitleWidget = QLocatorTitleWidget(title, titleIconPath, self.titleHeight, hideTitle)
self.visibleResultItemCounts.append(0)
self.reservedItemCounts.append(suggestedReservedItemCount)
self.titleItems.append(newTitleWidget)
self.resultLayout.addWidget(newTitleWidget)
newTitleWidget.setVisible(False)
self.resultItems.append([])
for i in range(suggestedReservedItemCount):
newWidget = QLocatorResultWidget(self.searchEdit, self.resultHeight, self.primaryTextWidth, self.secondaryTextWidth, self)
self.setupResultWidget(newWidget)
self.resultItems[-1].append(newWidget)
self.resultLayout.addWidget(newWidget)
newWidget.setVisible(False)
def setEscapeShortcuts(self, shortcuts):
for escapeShortcut in self.escapeShortcuts:
escapeShortcut.deleteLater()
self.escapeShortcuts = []
for shortcut in shortcuts:
newShortcut = QW.QShortcut(QG.QKeySequence(shortcut), self)
self.escapeShortcuts.append(newShortcut)
newShortcut.activated.connect(self.finish)
def setLocator(self, locator):
if self.locator:
self.locator.providerAdded.disconnect(self.providerAdded)
self.locator.resultsAvailable.disconnect(self.handleResultsAvailable)
self.reset()
| |
end timestamps
if robot.timestamp_start_initiate is not None and robot.timestamp_finish is not None:
trade_outcome, trade_commissions, trade_funding, percent_gained = sell_results_process(e_api, robot, robot.timestamp_start_initiate, robot.timestamp_finish)
# If start timestamp comes from process - need to consider original value to calc difference
if (robot.timestamp_start_initiate is None) and (robot.timestamp_start_process is not None) and (robot.timestamp_finish is not None):
trade_outcome, trade_commissions, trade_funding, percent_gained = sell_results_process(e_api, robot, robot.timestamp_start_process,
robot.timestamp_finish, use_start_value = True)
elif robot.exchange == 'oanda': # OANDA just uses the last trade and return the actual result so no need to pass start value
# Check which timestamp to use as a start
if robot.timestamp_start_initiate is not None:
timestamp_from = robot.timestamp_start_initiate
else:
timestamp_from = robot.timestamp_start_process
trade_outcome, trade_commissions, trade_funding, percent_gained = sell_results_process(e_api, robot, timestamp_from, robot.timestamp_finish)
#print("!!!!", trade_outcome, trade_commissions, trade_funding, percent_gained )
# For consistency
robot.earned_ratio = percent_gained/robot.margin_level
else: # in backtesting mode
if not robot.short_flag:
robot.earned_ratio = round((float(robot.price_exit)/float(robot.price_entry) - 1)*100, 2) # in %
else:
robot.earned_ratio = round((float(robot.price_entry)/float(robot.price_exit) - 1)*100, 2) # in %
percent_gained = robot.earned_ratio * robot.margin_level # percent gained and earned ratio > 0 mean profit
# Processing and writing the results / communicating
# Flag for losing / winning trade
if robot.earned_ratio >= 0:
robot.losing_trade_flag = False
else:
robot.losing_trade_flag = True
robot.trade_time = b_test.strftime("%d-%m-%Y %H:%M")
#robot.logger.lprint(['Total from all sales', robot.main_curr_from_sell, 'total commission', robot.commission_total]) #not using
robot.logger.lprint(['Price entry', robot.price_entry, ', price exit', robot.price_exit])
robot.logger.lprint(['Earned % (no margin)', robot.earned_ratio, '| Original value:', robot.value_original])
# Backtesting results storage
if config.backtesting_enabled:
robot.backtest_results.append([robot.entry_time, robot.trade_time, robot.earned_ratio])
robot.logger.lprint(['> Backtesting results so far (%)'])
tmp_sum = 0
tmp_str = '\n'
for elem in robot.backtest_results:
tmp_str = str(elem[2]) + '\n'
tmp_sum += elem[2]
robot.logger.lprint([tmp_str])
robot.logger.lprint(["Total", tmp_sum, '\n'])
# Also updating a summary log
# check and fix here. multiplier is still ok to look at though
##balance_summary = robot.value_original * (1 + float(robot.earned_ratio)/100)/robot.margin_level
robot.logger.write_summary(robot.backtest_results, robot.value_original_snapshot ,
robot.start_date, robot.trade_time, robot.margin_level,
codename = robot.codename, params_info = robot.attr_string)
# Emoji to use and descriptions
if not b_test.backtesting:
# GIFs and emoji
if robot.earned_ratio < 0:
emoji_text = '👻' # whoopsie
r = g.random(tag="poor")
else:
emoji_text = '💵' # dollar
r = g.random(tag="rich")
# Getting the pic
gifdata = r['data']
if gifdata != {}:
try: # in case of api issues
gifurl = r['data']['images']['downsized']['url']
except:
gifurl = None
else:
gifurl = None
if not robot.short_flag:
trade_description = '(long)'
else:
trade_description = '(short)'
price_exit_msg = round(float(robot.price_exit), robot.round_num)
msg_result = '{} {}: Entry price: {}, exit price: {} {}. \nOutcome: {}% ({}% on margin).'.format(
emoji_text, robot.market, robot.price_entry, price_exit_msg,
trade_description, round(robot.earned_ratio, 2),
round(robot.earned_ratio * robot.margin_level, 2))
send_chat_message(robot.user_id, msg_result)
if gifurl is not None:
try:
if not config.backtesting_enabled:
bot.sendDocument(robot.user_id, gifurl)
except:
robot.logger.lprint(["Cannot send gif"])
# Updating twitter if enabled
if (robot.lambobot is not None) and not robot.simulation:
comm_string_twitter = '{} {}: closed a position. Entry price: {}, ' \
'exit price: {} {}. {}% made (multiplied by margin) ' \
'{} #{} #{} #algotrading'.format(emoji_text, robot.market.upper(),
robot.price_entry, price_exit_msg,
trade_description, round(robot.earned_ratio, 2),
robot.twitter_comment, robot.trade, robot.currency)
if gifurl is not None:
try: # just in case if this returns an error
robot.lambobot.post_image(gifurl, comm_string_twitter)
except:
try:
robot.lambobot.post(comm_string_twitter)
except:
robot.logger.lprint(["Cannot tweet the status"])
else:
try:
robot.lambobot.post(comm_string_twitter)
except:
robot.logger.lprint(["Cannot tweet the status"])
# Update the DB with trade logs
if robot.timestamp_start_initiate is None:
active_start_timestamp = robot.timestamp_start_process
else:
active_start_timestamp = robot.timestamp_start_initiate
sql_string = "INSERT INTO trade_log(userid, start_timestamp," \
" end_timestamp, trade_outcome, trade_commissions, " \
"trade_funding, earned_ratio, percent_gained, core_strategy) VALUES " \
"({}, {}, {}, {}, {}, {}, {}, {}, '{}')".format(robot.user_id, active_start_timestamp, robot.timestamp_finish,
trade_outcome, trade_commissions, trade_funding, robot.earned_ratio, percent_gained, robot.core_strategy)
sql.query(sql_string)
### Just update db with the current price info
## NOPE! This messes up the prices
'''
def stop_reconfigure_update_db(robot, b_test):
if not b_test.backtesting:
sql_string = "SELECT id FROM market_info WHERE market = '{}'".format(robot.market)
rows = sql.query(sql_string)
if rows != []:
# Existing - updating
key_row_id = rows[0][0]
sql_string = "UPDATE market_info SET price = {} WHERE id = {}".format(robot.price, key_row_id)
sql.query(sql_string)
else:
# New - inserting
sql_string = "INSERT INTO market_info(market, price) VALUES ('{}', {})".format(
robot.market, robot.price
)
sql.query(sql_string)
'''
### Stop reconfigure status updates
def stop_reconfigure_status_update(robot, mode):
# Status updates every 4H
if (
(mode == 'now') or
((mode == 'process') and (abs(int(robot.time_hour_update) - int(robot.time_hour_comms)) >= 4))
):
robot.time_hour_comms = robot.time_hour_update
# Percent of entry
if mode != 'now':
re_percent_of = robot.percent_of_entry
if re_percent_of >= 100:
re_percent_of -= 100
price_descr_text = 'Price: (^) up {0:.2f}% from entry'.format(re_percent_of)
else:
re_percent_of = 100 - re_percent_of
price_descr_text = 'Price: (v) down {0:.2f}% from entry'.format(re_percent_of)
else:
price_descr_text = ''
status_update = "Status update ({} {}) {} \n\nPrediction: {}, confidence {:.0%}".format(
robot.market, robot.exchange_abbr, price_descr_text, robot.predicted_name(robot.prediction),
float(robot.prediction_probability))
send_chat_message(robot.user_id, status_update)
# Updating twitter
if robot.lambobot is not None:
comm_string_twitter = "You should have {} on {} (confidence {:.0%})".format(
robot.predicted_name(robot.prediction), robot.market.upper(), float(robot.prediction_probability))
try: # just in case if this returns an error - this should not kill the whole script
robot.lambobot.post(comm_string_twitter)
except:
pass
### Setting stop loss based on price data
# Returns whether price flipped to opposite TD action, and new stop targets based on larger td period
def stop_reconfigure(robot, mode = None, b_test = None):
# Check if this is a time to exit in backtesting mode
if b_test.finished:
robot.logger.lprint(["Finished backtesting as per config"])
robot.terminate()
# Timers update
robot.time_hour_update = b_test.strftime("%H")
timer_minute = int(b_test.strftime("%M"))
'''
# NOPE! Prices are centralised now
# Rewritten: only update current price info and read predictions provided from the db
stop_reconfigure_update_db(robot, b_test)
'''
# Get predictions from the DB if not backtesting
if not b_test.backtesting:
sql_string = "SELECT prediction, probability FROM market_info WHERE market = '{}'".format(robot.market)
rows = sql.query(sql_string)
if rows != []:
robot.prediction = robot.predicted_num_from_name(rows[0][0])
robot.prediction_probability = float(rows[0][1])
# If backtesting
else:
# Updating control bars if due for ML
if (timer_minute in robot.control_bars_minutes) or mode == 'now':
timer_control_bars_check = '{}-{}'.format(robot.time_hour_update, timer_minute)
if robot.timer_control_bars_update is None:
robot.timer_control_bars_update = timer_control_bars_check
if (robot.timer_control_bars_update != timer_control_bars_check) or mode == 'now':
robot.timer_control_bars_update = timer_control_bars_check
robot.logger.lprint(["(i) updating prediction"])
# Predictions update (requires processed data in workflow db now
try:
if config.backtesting_use_db_labels:
label_to_predict = td_info.get_features(b_test.strftime("%Y-%m-%d %H:%M:00"),
robot) # use this to get pre-computed DB results
else:
label_to_predict = td_info.get_features_realtime(robot, b_test) # realtime features calculation
robot.prediction, robot.prediction_probability = td_info.predict_label(label_to_predict, robot)
except TypeError:
robot.logger.lprint(["error: cannot calculate features. check the prices file."])
robot.prediction, robot.prediction_probability = 0, 1
# Show the results
robot.logger.lprint(["---- (i) prediction update: {}, confidence {:.0%}".format(
robot.predicted_name(robot.prediction), robot.prediction_probability)])
# Changing the result if we are out of market hours for traditional markets
if not is_trading_hours(b_test, robot):
robot.prediction, robot.prediction_probability = 0, 1
robot.logger.lprint(["---- (i) out of market hours or close: changing the flag to 'no position'"])
# Status updates every 4H
if not robot.is_restart:
stop_reconfigure_status_update(robot, mode=mode)
robot.is_restart = False
### Main sell function to sell at current prices
# Will be performed until the balance available for sale is zero or slightly more
def sell_now(at_price, b_test = None): # change for oanda: oanda closure is as simple as calling closeposition
robot.price_exit = at_price
# First run flag now to sleep on the first call
proceed_w_sleep = False
# Timer
timer_sell_now_start = b_test.time()
if robot.limit_sell_amount is not None:
robot.limit_sell_amount = float(robot.limit_sell_amount) # using str, we will not have more decimal numbers than needed
if robot.sell_portion is not None:
robot.sell_portion = float(robot.sell_portion)
if robot.simulation:
robot.balance_start = float(robot.simulation_balance)
robot.balance_available = float(robot.simulation_balance) # balance_available as of balance to close
robot.remaining_sell_balance = float(robot.simulation_balance)
# For bitmex / oanda, we will be trading contracts, no adjustments are available. Getting the balances and setting the original value
if robot.exchange in ['bitmex', 'oanda']:
if not robot.simulation and not b_test.backtesting:
# There were issues with testnet returning blanks so changed this
contracts_check = {}
positions = e_api.getpositions(robot.exchange, robot.market) # first not empty result
for position in positions:
if position != {}:
contracts_check = position
break # exit the for loop
# print 'contracts_check', contracts_check #TEST
# If nothing was found
if contracts_check == {}:
sell_run_flag = False
contracts = 0
else:
if robot.market in config.primary_calc_markets: #robot.market == 'btc/usd': #
contracts = contracts_check['contracts']
else:
contracts = contracts_check['contracts_no']
robot.contracts_start | |
<filename>permuta/permutils/pin_words.py
# pylint: disable=too-many-public-methods
# pylint: disable=eval-used
from bisect import bisect_left
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
from typing import DefaultDict, Dict, Iterator, List, Set, Tuple
from automata.fa.dfa import DFA
from automata.fa.nfa import NFA
from permuta import Av, Perm
from permuta.permutils import all_symmetry_sets
from permuta.permutils.pinword_util import PinWordUtil
DIRS = "ULDR"
QUADS = "1234"
class PinWords:
"""Class for pinowords"""
@staticmethod
def pinword_to_perm(word: str) -> "Perm":
"""Returns the permutation corresponding to a given pinword.
Examples:
>>> PinWords.pinword_to_perm("31")
Perm((0, 1))
>>> PinWords.pinword_to_perm("4R")
Perm((0, 1))
>>> PinWords.pinword_to_perm("3DL2UR")
Perm((3, 5, 1, 2, 0, 4))
>>> PinWords.pinword_to_perm("14L2UR")
Perm((3, 5, 1, 2, 0, 4))
"""
pwu = PinWordUtil()
pre_perm = [(pwu.rzero(), pwu.rzero())]
for char in word:
next_x, next_y = pwu.call(char, pre_perm)
if not (next_x and next_y):
assert False
pre_perm.append((next_x, next_y))
pre_perm.pop(0)
pre_perm.sort()
sorted_y_coord = sorted(x[1] for x in pre_perm)
perm = tuple(bisect_left(sorted_y_coord, x[1]) for x in pre_perm)
return Perm(perm)
@classmethod
def pinwords_of_length(cls, length: int) -> Iterator[str]:
"""
Generates all pinwords of length n.
Note that pinwords cannot contain any occurrence of:
UU, UD, DU, DD, LL, LR, RL, RR
"""
if length == 0:
yield ""
else:
for word in cls.pinwords_of_length(length - 1):
if len(word) > 0 and word[-1] != "U" and word[-1] != "D":
yield word + "U"
yield word + "D"
if len(word) > 0 and word[-1] != "R" and word[-1] != "L":
yield word + "L"
yield word + "R"
for char in QUADS:
yield word + char
@classmethod
@lru_cache(maxsize=None)
def pinword_to_perm_mapping(cls, length: int) -> Dict[str, "Perm"]:
"""Returns a dict that maps pinword to it's corresponding Perm"""
return {
pinword: cls.pinword_to_perm(pinword)
for pinword in cls.pinwords_of_length(length)
}
@classmethod
@lru_cache(maxsize=None)
def perm_to_pinword_mapping(cls, length: int) -> Dict:
"""Returns a dict that maps Perm to it's corresponding pinword"""
res = defaultdict(set)
for key, val in cls.pinword_to_perm_mapping(length).items():
res[val].add(key)
return res
@staticmethod
def is_strict_pinword(word: str) -> bool:
"""
Returns True if w is a strict pinword, False otherwise
"""
if word == "":
return True # paper does not mention the empty pinword
return word[0] in QUADS and all(word[i] in DIRS for i in range(1, len(word)))
@classmethod
def strict_pinwords_of_length(cls, length: int) -> Iterator[str]:
"""Yields alld pinwords of specified length"""
for word in cls.pinwords_of_length(length):
if cls.is_strict_pinword(word):
yield word
@classmethod
@lru_cache(maxsize=None)
def perm_to_strict_pinword_mapping(cls, length: int) -> Dict:
"""Maps perms o pinword if the pinowrd is strict"""
original = cls.perm_to_pinword_mapping(length)
filtered = {
k: {x for x in v if cls.is_strict_pinword(x)} for k, v in original.items()
}
return filtered
@staticmethod
def factor_pinword(word: str) -> List[str]:
"""
Factors a pinword into its strong numeral led factor decomposition.
Examples:
>>> PinWords.factor_pinword("14L2UR")
['1', '4L', '2UR']
"""
position = 0
factor_list = []
while position < len(word):
cur = position + 1
while cur < len(word) and word[cur] in DIRS:
cur += 1
factor_list.append(word[position:cur])
position = cur
return factor_list
@staticmethod
def sp_to_m(word: str) -> Tuple[str, ...]:
"""
The bijection phi in Definition 3.9 mapping words in SP to words in M.
Input must be a strict pin word. This implementation includes the extra
definition given in Remark 3.11, mapping words in M to words in M.
Examples:
>>> PinWords.sp_to_m("1R")
('RUR',)
>>> PinWords.sp_to_m("2UL")
('ULUL',)
>>> PinWords.sp_to_m("3")
('LD', 'DL')
>>> PinWords.sp_to_m("4D")
('DRD',)
"""
if word == "":
return ("",)
if word[0] in QUADS:
letter_dict = {"1": "RU", "2": "LU", "3": "LD", "4": "RD"}
opposite = {"U": "D", "D": "U", "L": "R", "R": "L"}
letters = letter_dict[word[0]]
if len(word) == 1:
return (letters, letters[::-1])
if letters[1] == word[1] or letters[1] == opposite[word[1]]:
letters = letters[::-1]
return (letters + word[1:],)
return (word,)
@staticmethod
def m_to_sp(word: str) -> str:
"""
The bijection phi in Definition 3.9 mapping words in M to words in SP.
Examples:
>>> PinWords.m_to_sp("RUR")
'1R'
>>> PinWords.m_to_sp("ULUL")
'2UL'
>>> PinWords.m_to_sp("DL")
'3'
>>> PinWords.m_to_sp("LD")
'3'
>>> PinWords.m_to_sp("DRD")
'4D'
"""
letter_dict = {"1": "RU", "2": "LU", "3": "LD", "4": "RD"}
rev_letter_dict = {}
for key, val in letter_dict.items():
rev_letter_dict[val] = key
rev_letter_dict[val[::-1]] = key
return rev_letter_dict[word[0:2]] + word[2:]
@classmethod
def quadrant(cls, word: str, ind: int) -> str:
"""
Determines the quadrant which point p_i in the pin representation resides in
with respect to the origin p_0. (Lemma 3.10)
Examples:
>>> PinWords.quadrant("2RU4LULURD4L", 2)
'1'
>>> PinWords.quadrant("2RU4LULURD4L", 3)
'4'
>>> PinWords.quadrant("2RU4LULURD4L", 6)
'2'
"""
if word[ind] in QUADS:
return word[ind]
if word[ind - 1] in QUADS:
return cls.m_to_sp(cls.sp_to_m(word[ind - 1 : ind + 1])[0][1:])
return cls.m_to_sp(word[ind - 1 : ind + 1])
@classmethod
def pinword_occurrences_sp(
cls, word: str, u_word: str, start_index: int = 0
) -> Iterator[int]:
"""
Yields all occurrences (starting indices) of strict pinword u in pinword w
(Lemma 3.12)
"""
k = len(u_word)
for idx in range(start_index, len(word)):
if (
cls.quadrant(word, idx) == cls.quadrant(u_word, 0)
and word[idx + 1 : idx + k] == u_word[1:]
):
yield idx
@classmethod
def pinword_contains_sp(cls, word: str, u_word: str) -> bool:
"""Returns True if pinword contains sp"""
return next(cls.pinword_occurrences_sp(word, u_word), False) is not False
@classmethod
def pinword_occurrences(cls, word: str, u_word: str) -> Iterator[Tuple[int, ...]]:
"""
Yields all occurrences (starting indices of pinword u in pinword w
(Theorem 3.13)
"""
def rec(
word: str, u_word: List[str], i: int, j: int, res: List[int]
) -> Iterator[Tuple[int, ...]]:
"""
Recursive helper function used to check for multiple sequential occurrences.
"""
if j == len(u_word):
yield tuple(res)
elif i >= len(word):
return
else:
for occ in cls.pinword_occurrences_sp(word, u_word[j], i):
res.append(occ)
for x in rec(word, u_word, occ + len(u_word[j]), j + 1, res):
yield x
res.pop()
return rec(word, cls.factor_pinword(u_word), 0, 0, [])
@classmethod
def pinword_contains(cls, word: str, u_word: str):
"""Returns True if piwnord u is in pinword w."""
return next(cls.pinword_occurrences(word, u_word), False) is not False
@classmethod
def make_nfa_for_pinword(cls, u_word: str) -> "NFA":
"""NFA for pinword"""
prefix = ""
def new_state(states) -> None:
states.add(prefix + str(len(states)))
def last_state(states) -> str:
return prefix + str(len(states) - 1)
def add_a_star(states, transitions) -> None:
new_state(states)
state = last_state(states)
transitions[state] = {x: {state} for x in DIRS}
def add_sp(u_i, states, transitions) -> None:
if len(u_i) == 2:
word1, word2 = u_i
state_a = last_state(states)
new_state(states)
state_b = last_state(states)
new_state(states)
state_c = last_state(states)
add_a_star(states, transitions)
state_d = last_state(states)
transitions[state_a][word1[0]].add(state_b)
transitions[state_a][word2[0]].add(state_c)
transitions[state_b] = {word1[1]: {state_d}}
transitions[state_c] = {word2[1]: {state_d}}
else:
(x,) = u_i
position = last_state(states)
for i, c_var in enumerate(x):
if i == len(x) - 1:
add_a_star(states, transitions)
else:
new_state(states)
nxt = last_state(states)
if c_var in transitions[position]:
transitions[position][c_var].add(nxt)
else:
transitions[position][c_var] = {nxt}
position = nxt
decomp = [cls.sp_to_m(x) for x in cls.factor_pinword(u_word)]
rev = False
if rev:
decomp = [x[::-1] for x in decomp[::-1]]
input_symbols = set(DIRS)
initial_state = "0"
states: Set[str] = set()
transitions: DefaultDict[str, dict] = defaultdict(dict)
add_a_star(states, transitions)
for u_i in decomp:
add_sp(u_i, states, transitions)
final_states = {last_state(states)}
return NFA(
states=states,
input_symbols=input_symbols,
transitions=transitions,
initial_state=initial_state,
final_states=final_states,
)
@staticmethod
def dfa_name_reset(dfa_in: "DFA", minimize=True) -> "DFA":
"""DFA name reset."""
if minimize:
return dfa_in.minify()
m_dict: Dict[str, str] = {}
for state in dfa_in.states:
m_dict[state] = str(len(m_dict))
return DFA(
states={m_dict[x] for x in dfa_in.states},
input_symbols=dfa_in.input_symbols,
transitions={
m_dict[x]: {k: m_dict[v] for k, v in dfa_in.transitions[x].items()}
for x in dfa_in.transitions
},
initial_state=m_dict[dfa_in.initial_state],
final_states={m_dict[x] for x in dfa_in.final_states},
)
@staticmethod
def make_dfa_for_m() -> "DFA":
"""Returns DFA for M."""
return DFA(
states={"0", "1", "2", "3"},
input_symbols=set(DIRS),
transitions={
"0": {"U": "1", "D": "1", "L": "2", "R": "2"},
"1": {"U": "3", "D": "3", "L": "2", "R": "2"},
"2": {"U": "1", "D": "1", "L": "3", "R": "3"},
"3": {"U": "3", "D": "3", "L": "3", "R": "3"},
},
initial_state="0",
final_states={"0", "1", "2"},
)
@classmethod
def make_dfa_for_pinword(cls, word: str) -> "DFA":
"""Returns DFA for pinword."""
return cls.dfa_name_reset(DFA.from_nfa(cls.make_nfa_for_pinword(word)))
@classmethod
def make_dfa_for_perm(cls, perm: "Perm") -> "DFA":
"""Returns DFA for Perm."""
pinwords = cls.pinwords_for_basis((perm,))
out_dfa: "DFA" = None
sorted_pinwords = sorted(pinwords)
for word in sorted_pinwords:
if out_dfa is None:
out_dfa = cls.make_dfa_for_pinword(word)
else:
out_dfa2 = cls.make_dfa_for_pinword(word)
for_union = out_dfa.union(out_dfa2)
out_dfa = cls.dfa_name_reset(for_union)
return out_dfa
@classmethod
def make_dfa_for_basis_from_pinwords(cls, basis: List["Perm"]) -> "DFA":
"""Returns DFA for basis from list of pinwords"""
pinwords = cls.pinwords_for_basis(basis)
out_dfa: "DFA" = None
sorted_pinwords = sorted(pinwords)
for word in sorted_pinwords:
if out_dfa is None:
out_dfa | |
<reponame>Naras-KS/progressive-transformers<filename>Architecture/progressive_transformer_lstm.py<gh_stars>1-10
################################################ Author - <NAME> #######################################################
################################################ Thanks to tensorflow and Keras ######################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import collections
import warnings
import numpy as np
from keras import activations
from keras import backend as K
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.layers import RNN
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import tf_utils
from tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util.tf_export import keras_export
"""
Classes for LSTMCells for each column in Progressive Neural Network Architecture. The cell state formulation for
each extended LSTM cell varies depending on the previous column LSTMCell values with self attention
LSTMCell has different calculations of cell formulation for each column and number of layer. Hence three classses
have defined for three separate column.
PROG_ATTLSTMCELL - LSTMCell for initial column and it has cell state formulation as same as built-in LSTMCell of keras
PROG_ATTLSTMCELL_1 - LSTMCell for extended column 1, which has cell state formulation depending on built-in LSTMCell
and previous cell state of initial column according to Progressive Neural Network
PROG_ATTLSTMCELL_2 - LSTMcell for extended column 2, which has cell stae formulation depending on built-in LSTMCell of
keras, and previous cell state of initial and previous column according to Progressive Neural Network
References:
- [Long short-term memory](
http://www.bioinf.jku.at/publications/older/2604.pdf)
- [Progressive Neural Networks](
https://arxiv.org/pdf/1606.04671.pdf)
- [Supervised sequence labeling with recurrent neural networks](
http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)
- [Efficient Estimation of Word Representations in Vector Space](
https://arxiv.org/pdf/1301.3781.pdf)
- [Long Short-Term Memory Networks for Machine Reading](
https://www.aclweb.org/anthology/D16-1053.pdf)
"""
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export(v1=['keras.layers.LSTMCell'])
class LSTMCell(DropoutRNNCellMixin, Layer):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
frozen_weights = None,
column = 0,
layer = 0,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop('enable_caching_device', True)
else:
self._enable_caching_device = kwargs.pop('enable_caching_device', False)
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.layer = layer
self.column = column
self.frozen_weights = None
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 1)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
# tuple(_ListWrapper) was silently dropping list content in at least 2.7.10,
# and fixed after 2.7.16. Converting the state_size to wrapper around
# NoDependency(), so that the base_layer.__setattr__ will not convert it to
# ListWrapper. Down the stream, self.states will be a list since it is
# generated from nest.map_structure with list, and tuple(list) will work
# properly.
self.state_size = data_structures.NoDependency([self.units, self.units])
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
default_caching_device = _caching_device(self)
input_dim = input_shape[-1]
self.Qkernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.Kkernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.Vkernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.get('ones')((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device)
else:
self.bias = None
self.built = True
if self.column > 1 and self.layer > 1:
if self.frozen_weights is not None:
if len(frozen_weights) == 1:
if self.use_bias:
if self.unit_forget_bias:
for i, trained_weights in enumerate(self.forzen_weights):
self.trained_Qkernel1, self.trained_Kkernel1, self.trained_Vkernel1, self.trained_recurrent_kernel1, self.bias1 = trained_weights
k_qti1, k_qtf1, k_qtc1, k_qto1 = tf.split(self.trained_Qkernel1, num_or_size_splits=4, axis=1)
k_kti1, k_ktf1, k_ktc1, k_kto1 = tf.split(self.trained_Kkernel1, num_or_size_splits=4, axis=1)
k_vti1, k_vtf1, k_vtc1, k_vto1 = tf.split(self.trained_Vkernel1, num_or_size_splits=4, axis=1)
b_ti1, b_tf1, b_tc1, b_to1 = tf.split(self.bias1, num_or_size_splits=4, axis=0)
rk_ti1 = self.trained_recurrent_kernel1[:, : self.units]
rk_tf1 = self.trained_recurrent_kernel1[:,self.units:self.units*2]
rk_tc1 = self.trained_recurrent_kernel1[:,self.units*2:self.units*3]
rk_to1 = self.trained_recurrent_kernel1[:,self.units*3:]
else:
self.trained_Qkernel1,self.trained_Kkernel1,self.trained_Vkernel1, self.trained_recurrent_kernel1 = trained_weights
self.bias1 = None
k_qti1, k_qtf1, k_qtc1, k_qto1 = tf.split(self.trained_Qkernel1, num_or_size_splits=4, axis=1)
k_kti1, k_ktf1, k_ktc1, k_kto1 = tf.split(self.trained_Kkernel1, num_or_size_splits=4, axis=1)
k_vti1, k_vtf1, k_vtc1, k_vto1 = tf.split(self.trained_Vkernel1, num_or_size_splits=4, axis=1)
b_ti1 = None
b_tf1 = None
b_tc1 = None
b_to1 = None
rk_ti1 = self.trained_recurrent_kernel1[:, : self.units]
rk_tf1 = self.trained_recurrent_kernel1[:,self.units:self.units*2]
rk_tc1 = self.trained_recurrent_kernel1[:,self.units*2:self.units*3]
rk_to1 = self.trained_recurrent_kernel1[:,self.units*3:]
else:
if self.use_bias:
if self.unit_forget_bias:
for i, trained_weights in enumerate(self.forzen_weights):
if i == 1:
self.trained_Qkernel1, self.trained_Kkernel1, self.trained_Vkernel1, self.trained_recurrent_kernel1, self.bias1 = trained_weights
if i == 2:
self.trained_Qkernel2, self.trained_Kkernel2, self.trained_Vkernel2, self.trained_recurrent_kernel2, self.bias2 = trained_weights
k_qti1, k_qtf1, k_qtc1, k_qto1 = tf.split(self.trained_Qkernel1, num_or_size_splits=4, axis=1)
k_kti1, k_ktf1, k_ktc1, k_kto1 = tf.split(self.trained_Kkernel1, num_or_size_splits=4, axis=1)
k_vti1, k_vtf1, k_vtc1, k_vto1 = tf.split(self.trained_Vkernel1, num_or_size_splits=4, axis=1)
b_ti1, b_tf1, b_tc1, b_to1 = tf.split(self.bias1, num_or_size_splits=4, axis=0)
rk_ti1 = self.trained_recurrent_kernel1[:, : self.units]
rk_tf1 = self.trained_recurrent_kernel1[:,self.units:self.units*2]
rk_tc1 = self.trained_recurrent_kernel1[:,self.units*2:self.units*3]
rk_to1 = self.trained_recurrent_kernel1[:,self.units*3:]
k_qti2, k_qtf2, k_qtc2, k_qto2 = tf.split(self.trained_Qkernel2, num_or_size_splits=4, axis=1)
k_kti2, k_ktf2, k_ktc2, k_kto2 = tf.split(self.trained_Kkernel2, num_or_size_splits=4, axis=1)
k_vti2, k_vtf2, k_vtc2, k_vto2 = tf.split(self.trained_Vkernel2, num_or_size_splits=4, axis=1)
b_ti2, b_tf2, b_tc2, b_to2 = tf.split(self.bias1, num_or_size_splits=4, axis=0)
rk_ti2 = self.trained_recurrent_kernel2[:, : self.units]
rk_tf2 = self.trained_recurrent_kernel2[:,self.units:self.units*2]
rk_tc2 = self.trained_recurrent_kernel2[:,self.units*2:self.units*3]
rk_to2 = self.trained_recurrent_kernel2[:,self.units*3:]
else:
for i, trained_weights in enumerate(self.forzen_weights):
if i == 1:
self.trained_Qkernel1, self.trained_Kkernel1, self.trained_Vkernel1, self.trained_recurrent_kernel1, self.bias1 = trained_weights
self.bias1 = None
if i == 2:
self.trained_Qkernel2, self.trained_Kkernel2, self.trained_Vkernel2, self.trained_recurrent_kernel2, self.bias2 = trained_weights
self.bias2 = None
k_qti1, k_qtf1, k_qtc1, k_qto1 = tf.split(self.trained_Qkernel1, num_or_size_splits=4, axis=1)
k_kti1, k_ktf1, k_ktc1, k_kto1 = tf.split(self.trained_Kkernel1, num_or_size_splits=4, axis=1)
k_vti1, k_vtf1, k_vtc1, k_vto1 = tf.split(self.trained_Vkernel1, num_or_size_splits=4, axis=1)
rk_ti1 = self.trained_recurrent_kernel1[:, : self.units]
rk_tf1 = self.trained_recurrent_kernel1[:,self.units:self.units*2]
rk_tc1 = self.trained_recurrent_kernel1[:,self.units*2:self.units*3]
rk_to1 = self.trained_recurrent_kernel1[:,self.units*3:]
k_qti2, k_qtf2, k_qtc2, k_qto2 = tf.split(self.trained_Qkernel2, num_or_size_splits=4, axis=1)
k_kti2, k_ktf2, k_ktc2, k_kto2 = tf.split(self.trained_Kkernel2, num_or_size_splits=4, axis=1)
k_vti2, k_vtf2, k_vtc2, k_vto2 = tf.split(self.trained_Vkernel2, num_or_size_splits=4, axis=1)
rk_ti2 = self.trained_recurrent_kernel2[:, : self.units]
rk_tf2 = self.trained_recurrent_kernel2[:,self.units:self.units*2]
rk_tc2 = self.trained_recurrent_kernel2[:,self.units*2:self.units*3]
rk_to2 = self.trained_recurrent_kernel2[:,self.units*3:]
b_ti1 = None
b_tf1 = None
b_tc1 = None
b_to1 = None
b_ti2 = None
b_tf2 = None
b_tc2 = None
b_to2 = None
def call(self, query, key, value, states, training = None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
| |
distance plot"""
if ax is None:
return
ax.set_xlabel('Distance (pc)')
ax.set_ylabel('E(B-V)')
ax.grid(which='both', zorder=1, color='0.5', alpha=0.5)
ax.set_title('l=%.2f, b=%.2f' % (self.l, self.b))
#### Test the comparison
def testOneSightline(l=0., b=4., Rv=3.1, useCoarse=False):
"""Try a single sight line"""
# Import the bovy et al. map so we don't have to re-initialize it
# for each sight-line
combined19 = mwdust.Combined19()
los = lineofsight(l, b, objBovy=combined19, Rv=Rv)
if useCoarse:
los.generateDistances(Verbose=True)
los.getLallementEBV()
los.getBovyEBV()
# show the line of sight
los.showLos(showPoints=True)
def hybridSightline(lCen=0., bCen=4., \
nl=4, nb=4, \
maxPc=9000., minPc=0.5, stepPc=25, \
nside=64, \
pixFillFac=1.0, collisionArcmin=2., \
pctUpper=75., pctLower=25., \
Rv=3.1, \
distFrac=1., diffRatioMin=0.5, \
setLimDynamically=False, \
minEBV=1.0e-3, \
minDistL19=1000., \
returnValues = False, \
tellTime=True, \
doPlots=True, \
figName='', \
useTwoBinnings=True, \
nBinsAllSightlines = 500, \
distancesPc = np.array([]), \
hpid=-1, nested=False, \
objBovy=None, \
objL19=None, \
versionL19='19', \
dmaxL19=1e6,\
bridgeL19 = False, \
bridgeWidthL19 = 1000, \
planckMap=None, \
planckUpperLim = 15.):
"""Samples the Bovy et al. and Lallement et al. 2019 E(B-V) vs
distance maps, constructed as the median E(B-V) vs distance curve
over nl x nb samples about the central sight line.
if "returnValues" is set, this returns the E(B-V), distances,
scale factor, and max distance for L19.
A modified version of <NAME>'s
"stilism_local.py" is used to query Lallement et al. 2019.
REQUIREMENTS beyond the standard numpy and matplotlib:
stilism_local.py: Currently, the script "stilism_local.py" must be
accessible on PYTHONPATH (or be in the current working directory),
and it must be able to locate the "stilism_cube_2.h5" file.
mwdust - Bovy's 3D extinction models and sampler.
healpy - must be present on the system (to convert NSIDE into a
pixel area). If mwdust successfully installed on your system then
you probably already have this.
If you want to query Planck in all the sight lines, Gregory
Green's "mwdust" is also required.
MORE INFO, ARGUMENTS:
The median over all the samples is taken as the E(B-V) curve for
each of the two models. The Lallement et al. 2019 model is scaled to
match the Bovy et al. model at a transition distance. This transition
distance can be determined dynamically or set to a fixed fraction of
the maximum distance of validity of the Lallement et al. 2019
model.
ARGUMENTS:
lCen, bCen = central line of sight, in degrees.
nl, nb = number of points in l, b about which to draw
samples. (The samples will be drawn in a grid (nl x nb) about the
central line of sight.)
maxPc, minPc, stepPc = max, min, stepsize for the distance in
parsecs along the line of sight. (minPc should be small but not
zero: 0.5 pc seems a sensible level to use.)
nside = Healpix NSIDE (used to estimate the side-length for the
square region sampled)
pixFillFrac = fraction of the side-length of the healpix to sample
with our pointings (the default is to sample out to the corners of
the pixel).
collisionArcmin = closest distance a sample can fall to the line
of sight without being rejected as a duplicate of the central line
of sight.
pctUpper, pctLower = lower and upper percentiles for displaying
the variation of E(B-V) within the healpix. (May be ambiguous to
interpret for small (nl x nb).)
Rv = scale factor converting L19's A_555 to E(B-V). Default 3.1.
distFrac = fraction of the L19 maximum distance to use as a
default for the overlap point between L19 and Bovy.
diffRatioMin = minimum fractional difference btween L19 and Bovy
for the two predictions to be considered "discrepant". Used if
estimating the overlap distance dynamically.
setLimDynamically: setting the overlap distance dynamically?
minEBV = minimum value for the Bovy extinction. Bovy points below
this value are ignored.
minDistL19 = minimum acceptable dynamically determined overlap
distance. If the dynamically determined distance is less than this
value, the default (distFrac x max(dist_L19) is used instead.
tellTime = Report screen output, including the timing.
doPlots = prepare plots?
returnValues = return the extinction and distances?
figName = filename for output figure file. If length < 3, no
figure is saved to disk.
useTwoBinnings: Uses finer distance bins for L19 than for Bovy et al., such that the total number of bins is the same for all sight lines.
nBinsAllSightlines = number of bins total for the sightlines
distancesPc = input array of distances in parsecs. If supplied, all the clever distance methods here are ignored in favor of the input distances.
hpid: if >0, then a healpix ID is being supplied, and will
override the choice of l, b. The field center is constructed from
this healpix id.
nested: if using hpids, is this with NESTED? Default is False
because sims_maf seems to use RING by default.
objBovy = bovy et al. dust object. Defaults to None and is
re-initialized in this method. But, could be passed in here too to
save time.
objL19 = Lallement et al. map object. Defaults to None and is
re-initialized in this method using the Rv and versionL19
arguments
versionL19 = Version of Lallement et al. to use if we are
re-initializing L19 here.
dmaxL19 = Maximum distance for Lallement et al. profile. Defaults
to a very large number so the profile up to the intrinsic maximum
distance of the map is used. Setting to zero disables Lallement's
map. Setting instead to a negative value will use Bovy alone if
non-zero for the line of sight, otherwise only Lallement.
bridgeL19 - if True, L19 is taken as "correct", and the E(B-V) is
extended from the value of L+19 at the maximum distance, to the
value of Bovy et al. at the maximum distance plus distance
bridgeWidthL19
bridgeWidthL19 - the distance interval over which the above
extension is to take place.
planckMap = healpix 2d map of Planck E(B-V) predictions. Ignored
if the query coords were not healpix, OR if Green's "dustmaps" is
available on the system.
planckUpperLim = upper limit for planck E(B-V) to be considered
"sensible"
EXAMPLE CALL:
compareExtinctions.hybridSightline(0, 4, figName='test_l0b4_ebvCompare.png', nl=5, nb=5, tellTime=True)
"""
# For our timing report
t0 = time.time()
# generate samples in l, b to sample a typical healpix
pixSideDeg = hp.nside2resol(nside, arcmin=True) / 60.
# how far to the edge of the pixel do we go in our samples?
pixEdge = np.min([pixFillFac, 1.0])
# now we generate the grid of samples
dL = pixSideDeg * pixEdge * np.linspace(-1., 1., nl, endpoint=True)
dB = pixSideDeg * pixEdge * np.linspace(-1., 1., nb, endpoint=True)
# create meshgrid and ravel into 1D arrays
ll, bb = np.meshgrid(dL, dB)
# convert the field center into equatorial so that we can generate
# the samples within the healpix
planckCen = 0.
if hpid < 0:
cooGAL = SkyCoord(lCen*u.deg, bCen*u.deg, frame='galactic')
raCen = cooGAL.icrs.ra.degree
deCen = cooGAL.icrs.dec.degree
else:
raCen, deCen = hp.pix2ang(nside, hpid, nested, lonlat=True)
cooEQ = SkyCoord(raCen*u.degree, deCen*u.degree, frame='icrs')
lCen = cooEQ.galactic.l.degree
bCen = cooEQ.galactic.b.degree
# fudge for wraparound, to use the same scheme as the samples
if lCen > 180:
lCen -= 360.
# do we have a planck map?
if planckMap is not None:
planckCen = planckMap[hpid]
vRA = ll.ravel() + raCen
vDE = bb.ravel() + deCen
# Ensure the coords of the samples actually are on the sphere...
bSampl = (vDE >= -90.) & (vDE <= 90.)
vRA = vRA[bSampl]
vDE = vDE[bSampl]
cooSamples = SkyCoord(vRA*u.deg, vDE*u.deg, frame='icrs')
vL = np.asarray(cooSamples.galactic.l.degree)
vB = np.asarray(cooSamples.galactic.b.degree)
# handle the wraparound
bLhi = vL > 180.
vL[bLhi] -= 360.
#vL = ll.ravel() + lCen
#vB = bb.ravel() + bCen
# knock out any points that are closer than distanceLim to the
# central sight line
offsets = (vL-lCen)**2 + (vB - bCen)**2
bSamplesKeep = (offsets*3600. > collisionArcmin**2) & \
(vB >= -90.) & (vB <= 90.)
if np.sum(bSamplesKeep) < 1:
print("hybridSightline WATCHOUT - no samples kept. Check | |
multiTime_cell_id_t1 = [adata.uns["Tmap_cell_id_t1"]]
multiTime_cell_id_t2 = [adata.uns["Tmap_cell_id_t2"]]
proportion = adata.uns["proportion"]
transition_map = adata.uns["transition_map"]
X_clone = clone_annot.copy()
if not ssp.issparse(X_clone):
X_clone = ssp.csr_matrix(X_clone)
demultiplexed_map = tmap_core.refine_Tmap_through_cospar_noSmooth(
multiTime_cell_id_t1,
multiTime_cell_id_t2,
proportion,
transition_map,
X_clone,
sparsity_threshold=intraclone_threshold,
normalization_mode=normalization_mode,
)
adata.uns["intraclone_transition_map"] = ssp.csr_matrix(demultiplexed_map)
def infer_Tmap_from_one_time_clones(
adata_orig,
initial_time_points=None,
later_time_point=None,
initialize_method="OT",
OT_epsilon=0.02,
OT_dis_KNN=5,
OT_cost="SPD",
HighVar_gene_pctl=85,
padding_X_clone=False,
normalization_mode=1,
sparsity_threshold=0.2,
CoSpar_KNN=20,
use_full_Smatrix=True,
smooth_array=[15, 10, 5],
trunca_threshold=[0.001, 0.01],
compute_new=False,
max_iter_N=[1, 5],
epsilon_converge=[0.05, 0.05],
use_fixed_clonesize_t1=False,
sort_clone=1,
save_subset=True,
use_existing_KNN_graph=False,
):
"""
Infer transition map from clones with a single time point
We jointly infer a transition map and the initial clonal observation
through iteration. The inferred map is between each of the initial
time points ['day_1','day_2',...,] and the time point with clonal
observation. We initialize the transition map by either the OT
method or HighVar method.
**Summary**
* Parameters relevant for cell state selection: initial_time_points,
later_time_point.
* Initialization methods:
* 'OT': optional transport based method. Key parameters: `OT_epsilon, OT_dis_KNN`.
See :func:`.infer_Tmap_from_optimal_transport`.
* 'HighVar': a customized approach, assuming that cells similar in gene
expression across time points share clonal origin. Key parameter: `HighVar_gene_pctl`.
See :func:`.infer_Tmap_from_HighVar`.
* Key parameters relevant for joint optimization itself (which relies on coherent sparse optimization):
`smooth_array, CoSpar_KNN, sparsity_threshold`. See :func:`.refine_Tmap_through_joint_optimization`.
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
It is assumed to be preprocessed and has multiple time points.
initial_time_points: `list`, optional (default, all time points)
List of initial time points to be included for the transition map.
Like ['day_1','day_2']. Entries consistent with adata.obs['time_info'].
later_time_point: `str`, optional (default, the last time point)
The time point with clonal observation. Its value should be
consistent with adata.obs['time_info'].
initialize_method: `str`, optional (default 'OT')
Method to initialize the transition map from state information.
Choice: {'OT', 'HighVar'}.
OT_epsilon: `float`, optional (default: 0.02)
The entropic regularization, >0. A larger value increases
uncertainty of the transition. Relevant when `initialize_method='OT'`.
OT_dis_KNN: `int`, optional (default: 5)
Number of nearest neighbors to construct the KNN graph for
computing the shortest path distance. Relevant when `initialize_method='OT'`.
OT_cost: `str`, optional (default: `SPD`), options {'GED','SPD'}
The cost metric. We provide gene expression distance (GED), and also
shortest path distance (SPD). GED is much faster, but SPD is more accurate.
However, cospar is robust to the initialization.
HighVar_gene_pctl: `int`, optional (default: 85)
Percentile threshold to select highly variable genes to construct pseudo-clones.
A higher value selects more variable genes. Range: [0,100].
Relevant when `initialize_method='HighVar'`.
padding_X_clone: `bool`, optional (default: False)
If true, select cells at the `later_time_point` yet without any clonal label, and
generate a unique clonal label for each of them. This adds artificial clonal data.
However, it will make the best use of the state information, especially when there
are very few clonal barcodes in the data.
normalization_mode: `int`, optional (default: 1)
Normalization method. Choice: [0,1].
0, single-cell normalization; 1, Clone normalization. The clonal
normalization suppresses the contribution of large
clones, and is much more robust.
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at initial runs of iteration.
Suppose that it has a length N. For iteration n<N, the n-th entry of
smooth_array determines the kernel exponent to build the S matrix at the n-th
iteration. When n>N, we use the last entry of smooth_array to compute
the S matrix. We recommend starting with more smoothing depth and gradually
reduce the depth, as inspired by simulated annealing. Data with higher
clonal dispersion should start with higher smoothing depth. The final depth should
depend on the manifold itself. For fewer cells, it results in a small KNN graph,
and a small final depth should be used. We recommend to use a number at
the multiple of 5 for computational efficiency i.e.,
smooth_array=[20, 15, 10, 5], or [20,15,10]
max_iter_N: `list`, optional (default: [1,5])
A list for maximum iterations for the Joint optimization and CoSpar core function, respectively.
epsilon_converge: `list`, optional (default: [0.05,0.05])
A list of convergence threshold for the Joint optimization and CoSpar core function, respectively.
The convergence threshold is for the change of map correlations between consecutive iterations.
For CoSpar core function, this convergence test is activated only when CoSpar has iterated for 3 times.
CoSpar_KNN: `int`, optional (default: 20)
The number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `list`, optional (default: [0.001,0.01])
Threshold to reset entries of a matrix to zero. The first entry is for
Similarity matrix; the second entry is for the Tmap.
This is only for computational and storage efficiency.
sparsity_threshold: `float`, optional (default: 0.1)
The relative threshold to remove noises in the updated transition map,
in the range [0,1].
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...];
Otherwise, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: True)
If true, extract the relevant Smatrix from the full Smatrix defined by all cells.
This tends to be more accurate. The package is optimized around this choice.
use_fixed_clonesize_t1: `bool`, optional (default: False)
If true, fix the number of initial states as the same for all clones
sort_clone: `int`, optional (default: 1)
The order to infer initial states for each clone: {1,-1,others}.
1, sort clones by size from small to large;
-1, sort clones by size from large to small;
others, do not sort.
compute_new: `bool`, optional (default: False)
If True, compute everything (ShortestPathDis, OT_map, etc.) from scratch,
whether it was computed and saved before or not. Regarding the Smatrix, it is
recomputed only when `use_full_Smatrix=False`.
use_existing_KNN_graph: `bool`, optional (default: False)
If true and adata.obsp['connectivities'], use the existing knn graph
to compute the shortest-path distance. Revelant if initialize_method='OT'.
This overrides all other relevant parameters for building shortest-path distance.
Returns
-------
adata: :class:`~anndata.AnnData` object
Update adata.obsm['X_clone'] and adata.uns['transition_map'],
as well as adata.uns['OT_transition_map'] or
adata.uns['HighVar_transition_map'], depending on the initialization.
adata_orig.obsm['X_clone'] remains the same.
"""
t0 = time.time()
hf.check_available_clonal_info(adata_orig)
clonal_time_points_0 = np.array(adata_orig.uns["clonal_time_points"])
time_ordering = adata_orig.uns["time_ordering"]
if len(clonal_time_points_0) == 0:
raise ValueError(
"No clonal time points available for this dataset. Please run cs.tmap.infer_Tmap_from_state_info_alone."
)
if later_time_point is None:
sel_idx_temp = np.in1d(time_ordering, clonal_time_points_0)
later_time_point = time_ordering[sel_idx_temp][-1]
if type(later_time_point) == list:
later_time_point = later_time_point[0]
# use the last clonal later time point
if initial_time_points is None:
sel_id_temp = np.nonzero(np.in1d(time_ordering, [later_time_point]))[0][0]
initial_time_points = time_ordering[:sel_id_temp]
sel_idx_temp = np.in1d(time_ordering, initial_time_points)
initial_time_points = list(time_ordering[sel_idx_temp])
if later_time_point in initial_time_points:
logg.warn(f"remove {later_time_point} from initial_time_points")
initial_time_points.remove(later_time_point)
hf.check_input_parameters(
adata_orig,
later_time_point=later_time_point,
initial_time_points=initial_time_points,
smooth_array=smooth_array,
save_subset=save_subset,
)
if initialize_method not in ["OT", "HighVar"]:
logg.warn(
"initialize_method not among ['OT','HighVar']. Use initialize_method='OT'"
)
initialize_method = "OT"
if OT_cost not in ["GED", "SPD"]:
logg.warn("OT_cost not among ['GED','SPD']. Use OT_cost='SPD'")
OT_cost = "SPD"
sp_idx = np.zeros(adata_orig.shape[0], dtype=bool)
time_info_orig = np.array(adata_orig.obs["time_info"])
all_time_points = list(initial_time_points) + [later_time_point]
label = "t"
for xx in all_time_points:
id_array = np.nonzero(time_info_orig == xx)[0]
sp_idx[id_array] = True
label = label + "*" + str(xx)
adata = adata_orig[sp_idx]
clone_annot_orig = adata_orig.obsm["X_clone"].copy()
data_des_orig = adata_orig.uns["data_des"][0]
data_des_0 = adata_orig.uns["data_des"][-1]
data_des = data_des_0 + f"_OneTimeClone_{label}"
adata.uns["data_des"] = [data_des_orig, data_des]
time_info = np.array(adata.obs["time_info"])
time_index_t2 = time_info == later_time_point
time_index_t1 = ~time_index_t2
## set cells without a clone ID to have a unique clone ID
if padding_X_clone:
logg.info("Generate a unique clonal label for each clonally unlabeled cell.")
time_index_t2_orig = time_info_orig == later_time_point
zero_clone_idx = clone_annot_orig[time_index_t2_orig].sum(1).A.flatten() == 0
clone_annot_t2_padding = np.diag(np.ones(np.sum(zero_clone_idx)))
non_zero_clones_idx = (
clone_annot_orig[time_index_t2_orig].sum(0).A.flatten() > 0
)
M0 = np.sum(non_zero_clones_idx)
M1 = clone_annot_t2_padding.shape[1]
clone_annot_new = np.zeros((clone_annot_orig.shape[0], M0 + M1))
clone_annot_new[:, :M0] = clone_annot_orig[:, non_zero_clones_idx].A
sp_id_t2 = np.nonzero(time_index_t2_orig)[0]
clone_annot_new[sp_id_t2[zero_clone_idx], M0:] = clone_annot_t2_padding
else:
clone_annot_new = clone_annot_orig
# remove clones without a cell at t2
valid_clone_id = np.nonzero(
clone_annot_new[time_info_orig == later_time_point].sum(0).A.flatten() > 0
)[0]
X_clone_temp = clone_annot_new[:, valid_clone_id]
adata_orig.obsm["X_clone"] = ssp.csr_matrix(X_clone_temp)
#### used for similarity matrix generation
Tmap_cell_id_t1 = np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2 = np.nonzero(time_index_t2)[0]
adata.uns["Tmap_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["Tmap_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["clonal_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["clonal_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["sp_idx"] = sp_idx
data_path = settings.data_path
transition_map = np.zeros((len(Tmap_cell_id_t1), len(Tmap_cell_id_t2)))
ini_transition_map = np.zeros((len(Tmap_cell_id_t1), len(Tmap_cell_id_t2)))
X_clone_updated = adata_orig.obsm["X_clone"][
sp_idx
].A # this does not work well if there are empty clones to begin with
logg.info(
"--------Infer transition map between initial time points and the later time one-------"
)
| |
type: ignore
def _update_mongo_db_collection_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
update_throughput_parameters, # type: "_models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_mongo_db_collection_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_mongo_db_collection_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}/throughputSettings/default'} # type: ignore
def begin_update_mongo_db_collection_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
update_throughput_parameters, # type: "_models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ThroughputSettingsGetResults"]
"""Update the RUs per second of an Azure Cosmos DB MongoDB collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param collection_name: Cosmos DB collection name.
:type collection_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current MongoDB collection.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_mongo_db_collection_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
collection_name=collection_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_mongo_db_collection_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}/throughputSettings/default'} # type: ignore
def _migrate_mongo_db_collection_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
# Construct URL
url = self._migrate_mongo_db_collection_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponseUpdatedFormat, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_mongo_db_collection_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_mongo_db_collection_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB MongoDB collection from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param collection_name: Cosmos DB collection name.
:type collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_mongo_db_collection_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
collection_name=collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_mongo_db_collection_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_mongo_db_collection_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
# Construct URL
url = self._migrate_mongo_db_collection_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponseUpdatedFormat, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_mongo_db_collection_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_mongo_db_collection_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB MongoDB collection from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account | |
'tool_dependencies.xml',
filepath=os.path.split( xml_filename )[0],
commit_message='Uploaded dependency on %s.' % ', '.join( repo.name for repo in depends_on[ 'repositories' ] ) )
def create_repository_dependency( self, repository=None, depends_on=[], filepath=None ):
dependency_description = '%s depends on %s.' % ( repository.name, ', '.join( repo.name for repo in depends_on ) )
self.generate_repository_dependency_xml( depends_on,
self.get_filename( 'repository_dependencies.xml', filepath=filepath ),
dependency_description=dependency_description )
self.upload_file( repository,
'repository_dependencies.xml',
filepath=filepath,
commit_message='Uploaded dependency on %s.' % ', '.join( repo.name for repo in depends_on ) )
def create_repository_review( self, repository, review_contents_dict, changeset_revision=None, copy_from=None):
strings_displayed = []
if not copy_from:
strings_displayed.append( 'Begin your review' )
strings_not_displayed = []
kwd = dict()
if not changeset_revision:
changeset_revision = self.get_repository_tip( repository )
url = '/repository_review/create_review?changeset_revision=%s&id=%s' % ( changeset_revision, self.security.encode_id( repository.id ) )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
strings_displayed = []
if copy_from:
old_changeset_revision, review_id = copy_from
strings_displayed = [ 'You have elected to create a new review', 'Select previous revision', changeset_revision ]
self.check_for_strings( strings_displayed )
strings_displayed = []
url = '/repository_review/create_review?changeset_revision=%s&id=%s&previous_review_id=%s' % \
( self.get_repository_tip( repository ), self.security.encode_id( repository.id ), self.security.encode_id( review_id ) )
self.visit_url( url )
self.fill_review_form( review_contents_dict, strings_displayed, strings_not_displayed )
def create_user_in_galaxy( self, cntrller='user', email='<EMAIL>', password='<PASSWORD>', username='admin-user', redirect='' ):
self.visit_galaxy_url( "/user/create?cntrller=%s&use_panels=False" % cntrller )
tc.fv( '1', 'email', email )
tc.fv( '1', 'redirect', redirect )
tc.fv( '1', 'password', password )
tc.fv( '1', 'confirm', password )
tc.fv( '1', 'username', username )
tc.submit( 'create_user_button' )
previously_created = False
username_taken = False
invalid_username = False
try:
self.check_page_for_string( "Created new user account" )
except:
try:
# May have created the account in a previous test run...
self.check_page_for_string( "User with that email already exists" )
previously_created = True
except:
try:
self.check_page_for_string( 'Public name is taken; please choose another' )
username_taken = True
except:
try:
# Note that we're only checking if the usr name is >< 4 chars here...
self.check_page_for_string( 'Public name must be at least 4 characters in length' )
invalid_username = True
except:
pass
return previously_created, username_taken, invalid_username
def delete_files_from_repository( self, repository, filenames=[], strings_displayed=[ 'were deleted from the repository' ], strings_not_displayed=[] ):
files_to_delete = []
basepath = self.get_repo_path( repository )
repository_files = self.get_repository_file_list( base_path=basepath, current_path=None )
# Verify that the files to delete actually exist in the repository.
for filename in repository_files:
if filename in filenames:
files_to_delete.append( os.path.join( basepath, filename ) )
self.browse_repository( repository )
# Twill sets hidden form fields to read-only by default. We need to write to this field.
form = tc.browser.get_form( 'select_files_to_delete' )
form.find_control( "selected_files_to_delete" ).readonly = False
tc.fv( "1", "selected_files_to_delete", ','.join( files_to_delete ) )
tc.submit( 'select_files_to_delete_button' )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_all_workflows( self, strings_displayed=[], strings_not_displayed=[] ):
url = '/workflow'
self.visit_galaxy_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_galaxy_browse_repositories_page( self, strings_displayed=[], strings_not_displayed=[] ):
url = '/admin_toolshed/browse_repositories'
self.visit_galaxy_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_installed_repository_manage_page( self, installed_repository, strings_displayed=[], strings_not_displayed=[] ):
url = '/admin_toolshed/manage_repository?id=%s' % self.security.encode_id( installed_repository.id )
self.visit_galaxy_url( url )
strings_displayed.extend( [ installed_repository.name,
installed_repository.description,
installed_repository.owner,
installed_repository.tool_shed,
installed_repository.installed_changeset_revision ] )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_installed_workflow_image( self, repository, workflow_name, strings_displayed=[], strings_not_displayed=[] ):
url = '/admin_toolshed/generate_workflow_image?repository_id=%s&workflow_name=%s' % \
( self.security.encode_id( repository.id ), tool_shed_encode( workflow_name ) )
self.visit_galaxy_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_manage_repository_page( self, repository, changeset_revision=None, strings_displayed=[], strings_not_displayed=[] ):
base_url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id )
if changeset_revision:
url = '%s&changeset_revision=%s' % ( base_url, changeset_revision )
else:
changeset_revision = self.get_repository_tip( repository )
url = base_url
self.visit_url( url )
metadata = self.get_repository_metadata_by_changeset_revision( repository, changeset_revision )
if metadata:
if 'tool_dependencies' in metadata.metadata:
strings_displayed.append( 'Tool dependencies' )
for dependency in metadata.metadata[ 'tool_dependencies' ]:
if dependency == 'set_environment':
for environment_dependency in metadata.metadata[ 'tool_dependencies' ][ dependency ]:
strings_displayed.append( environment_dependency[ 'name' ] )
strings_displayed.append( environment_dependency[ 'type' ] )
else:
strings_displayed.append( metadata.metadata[ 'tool_dependencies' ][ dependency ][ 'name' ] )
strings_displayed.append( metadata.metadata[ 'tool_dependencies' ][ dependency ][ 'version' ] )
strings_displayed.append( metadata.metadata[ 'tool_dependencies' ][ dependency ][ 'type' ] )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_repository_clone_page( self, owner_name, repository_name, strings_displayed=[], strings_not_displayed=[] ):
url = '/repos/%s/%s' % ( owner_name, repository_name )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_repository_file_contents( self, repository, filename, filepath=None, strings_displayed=[], strings_not_displayed=[] ):
'''Find a file in the repository and display the contents.'''
basepath = self.get_repo_path( repository )
repository_file_list = []
if filepath:
relative_path = os.path.join( basepath, filepath )
else:
relative_path = basepath
repository_file_list = self.get_repository_file_list( base_path=relative_path, current_path=None )
assert filename in repository_file_list, 'File %s not found in the repository under %s.' % ( filename, relative_path )
url = '/repository/get_file_contents?file_path=%s' % os.path.join( relative_path, filename )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_reviewed_repositories_owned_by_user( self, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository_review/reviewed_repositories_i_own'
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def edit_repository_categories( self, repository, categories_to_add=[], categories_to_remove=[], restore_original=True ):
url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id )
self.visit_url( url )
strings_displayed = []
strings_not_displayed = []
for category in categories_to_add:
tc.fv( "2", "category_id", '+%s' % category)
strings_displayed.append( "selected>%s" % category )
for category in categories_to_remove:
tc.fv( "2", "category_id", '-%s' % category)
strings_not_displayed.append( "selected>%s" % category )
tc.submit( "manage_categories_button" )
self.check_for_strings( strings_displayed, strings_not_displayed )
if restore_original:
strings_displayed = []
strings_not_displayed = []
for category in categories_to_remove:
tc.fv( "2", "category_id", '+%s' % category)
strings_displayed.append( "selected>%s" % category )
for category in categories_to_add:
tc.fv( "2", "category_id", '-%s' % category)
strings_not_displayed.append( "selected>%s" % category )
tc.submit( "manage_categories_button" )
self.check_for_strings( strings_displayed, strings_not_displayed )
def display_repository_reviews_by_user( self, user, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository_review/repository_reviews_by_user?id=%s' % self.security.encode_id( user.id )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def edit_repository_information( self, repository, **kwd ):
url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id )
self.visit_url( url )
original_information = dict( repo_name=repository.name, description=repository.description, long_description=repository.long_description )
strings_displayed = []
strings_not_displayed = []
for input_elem_name in [ 'repo_name', 'description', 'long_description' ]:
if input_elem_name in kwd:
tc.fv( "1", input_elem_name, kwd[ input_elem_name ] )
strings_displayed.append( self.escape_html( kwd[ input_elem_name ] ) )
tc.submit( "edit_repository_button" )
self.check_for_strings( strings_displayed )
strings_displayed = []
for input_elem_name in [ 'repo_name', 'description', 'long_description' ]:
tc.fv( "1", input_elem_name, original_information[ input_elem_name ] )
strings_displayed.append( self.escape_html( original_information[ input_elem_name ] ) )
tc.submit( "edit_repository_button" )
self.check_for_strings( strings_displayed )
def escape_html( self, string, unescape=False ):
html_entities = [ ('&', 'X' ), ( "'", ''' ), ( '"', '"' ) ]
for character, replacement in html_entities:
if unescape:
string = string.replace( replacement, character )
else:
string = string.replace( character, replacement )
return string
def fill_review_form( self, review_contents_dict, strings_displayed=[], strings_not_displayed=[] ):
kwd = dict()
for label, contents in review_contents_dict.items():
strings_displayed.append( label )
if contents:
kwd[ '%s__ESEP__comment' % label ] = contents[ 'comment' ]
kwd[ '%s__ESEP__rating' % label ] = contents[ 'rating' ]
if 'private' in contents:
kwd[ '%s__ESEP__private' % label ] = contents[ 'private' ]
kwd[ '%s__ESEP__approved' % label ] = contents[ 'approved' ]
else:
kwd[ '%s__ESEP__approved' % label ] = 'not_applicable'
self.submit_form( 1, 'Workflows__ESEP__review_button', **kwd )
strings_displayed.append( 'Reviews were saved' )
self.check_for_strings( strings_displayed, strings_not_displayed )
def galaxy_login( self, email='<EMAIL>', password='<PASSWORD>', username='admin-user', redirect='' ):
previously_created, username_taken, invalid_username = \
self.create_user_in_galaxy( email=email, password=password, username=username, redirect=redirect )
if previously_created:
self.visit_galaxy_url( "/user/login?use_panels=False" )
tc.fv( '1', 'email', email )
tc.fv( '1', 'redirect', redirect )
tc.fv( '1', 'password', password )
tc.submit( 'login_button' )
def galaxy_logout( self ):
self.home()
self.visit_galaxy_url( "/user/logout" )
self.check_page_for_string( "You have been logged out" )
self.home()
def generate_invalid_dependency_xml( self, xml_filename, url, name, owner, changeset_revision, complex=True, package=None, version=None, description=None ):
file_path = os.path.split( xml_filename )[0]
dependency_entries = []
template = string.Template( common.new_repository_dependencies_line )
dependency_entries.append( template.safe_substitute( toolshed_url=url,
owner=owner,
repository_name=name,
changeset_revision=changeset_revision ) )
if not os.path.exists( file_path ):
os.makedirs( file_path )
if complex:
dependency_template = string.Template( common.complex_repository_dependency_template )
repository_dependency_xml = dependency_template.safe_substitute( package=package, version=version, dependency_lines='\n'.join( dependency_entries ) )
else:
if not description:
description = ' description=""'
else:
description = ' description="%s"' % description
template_parser = string.Template( common.new_repository_dependencies_xml )
repository_dependency_xml = template_parser.safe_substitute( description=description, dependency_lines='\n'.join( dependency_entries ) )
# Save the generated xml to the specified location.
file( xml_filename, 'w' ).write( repository_dependency_xml )
def generate_repository_dependency_xml( self, repositories, xml_filename, dependency_description='', complex=False, package=None, version=None ):
file_path = os.path.split( xml_filename )[0]
if not os.path.exists( file_path ):
os.makedirs( file_path )
dependency_entries = []
for repository in repositories:
changeset_revision = self.get_repository_tip( repository )
template = string.Template( | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\ResidenceAndProcessesBigData.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtWebEngineWidgets
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import pyodbc
import logging
import plotly
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import numbers
LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
sys.argv.append("--disable-web-security")
path = QtCore.QDir.current().filePath('./plotly-latest.min.js')
local = QtCore.QUrl.fromLocalFile(path).toString()
QUERYPROCESSES = r"""
SELECT CatalogoGranDato.NombreGranDato, CarLibrosyProcesos.NombreProcesoLibro, COUNT(*)
FROM CatalogoGranDato, MasterProcesos, UsoProcesos, CarLibrosyProcesos
WHERE CatalogoGranDato.NombreGranDato = '{0}'
AND MasterProcesos.ClaveGrandato = CatalogoGranDato.ClaveGracDato
AND MasterProcesos.ClaveDatoProceso = UsoProcesos.ClaveDatoProceso
AND MasterProcesos.ClaveProceso = CarLibrosyProcesos.ClaveProcesoLibro
GROUP BY CatalogoGranDato.NombreGranDato, CarLibrosyProcesos.NombreProcesoLibro
"""
QUERYSUBPROCESSES = r"""
SELECT CarLibrosyProcesos.NombreProcesoLibro,
UsoProcesos.Documento + '<br>' + CarLibrosyProcesos.NombreProcesoLibro,
COUNT(*)
FROM CatalogoGranDato, MasterProcesos, UsoProcesos, CarLibrosyProcesos
WHERE CatalogoGranDato.NombreGranDato = '{0}'
AND MasterProcesos.ClaveGrandato = CatalogoGranDato.ClaveGracDato
AND MasterProcesos.ClaveDatoProceso = UsoProcesos.ClaveDatoProceso
AND MasterProcesos.ClaveProceso = CarLibrosyProcesos.ClaveProcesoLibro
GROUP BY CatalogoGranDato.NombreGranDato, CarLibrosyProcesos.NombreProcesoLibro, UsoProcesos.Documento
"""
QUERYBOOKS = r"""
SELECT GranDato, CarLibrosyProcesos.NombreProcesoLibro, COUNT(*)
FROM CarLibrosyProcesos, UsoLibros INNER JOIN MasterLibros
ON UsoLibros.IdDato = MasterLibros.IdDato
WHERE UsoLibros.Tipo_Linaje = 'Residencia'
AND GranDato = '{0}'
AND UsoLibros.Libro <> 'MOCPr'
AND UsoLibros.Libro = CarLibrosyProcesos.ClaveProcesoLibro
GROUP BY GranDato, CarLibrosyProcesos.NombreProcesoLibro;
"""
QUERYTABLES = r"""
SELECT CarLibrosyProcesos.NombreProcesoLibro, IIF(ISNULL(Tabla), 'Nulo', Tabla) + '<br>' + CarLibrosyProcesos.NombreProcesoLibro, COUNT(*)
FROM CarLibrosyProcesos, UsoLibros INNER JOIN MasterLibros
ON UsoLibros.IdDato = MasterLibros.IdDato
WHERE UsoLibros.Tipo_Linaje = 'Residencia'
AND GranDato = '{0}'
AND UsoLibros.Libro <> 'MOCPr'
AND UsoLibros.Libro = CarLibrosyProcesos.ClaveProcesoLibro
GROUP BY GranDato, CarLibrosyProcesos.NombreProcesoLibro, Tabla;
"""
QUERYFIELDS = r"""
SELECT IIF(ISNULL(Tabla), 'Nulo', Tabla) + '<br>' + CarLibrosyProcesos.NombreProcesoLibro,
IIF(ISNULL(Campo), 'Nulo', Campo) + '<br>'
+ IIF(ISNULL(Tabla), 'Nulo', Tabla) + '<br>'
+ CarLibrosyProcesos.NombreProcesoLibro, COUNT(*)
FROM CarLibrosyProcesos, UsoLibros INNER JOIN MasterLibros
ON UsoLibros.IdDato = MasterLibros.IdDato
WHERE UsoLibros.Tipo_Linaje = 'Residencia'
AND GranDato = '{0}'
AND UsoLibros.Libro <> 'MOCPr'
AND UsoLibros.Libro = CarLibrosyProcesos.ClaveProcesoLibro
GROUP BY GranDato, CarLibrosyProcesos.NombreProcesoLibro, Tabla, Campo;
"""
# TODO: Union entre Procesos y Libros
QUERYDROPDOWN = r"""
SELECT DISTINCT CatalogoGranDato.NombreGranDato
FROM CatalogoGranDato, MasterProcesos, UsoProcesos
WHERE MasterProcesos.ClaveGrandato = CatalogoGranDato.ClaveGracDato
AND MasterProcesos.ClaveDatoProceso = UsoProcesos.ClaveDatoProceso
"""
class ResidenceAndProcessesBigData(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.btnExpandProcesses = QtWidgets.QPushButton(self.centralwidget)
self.btnExpandProcesses.setMinimumSize(QtCore.QSize(350, 0))
self.btnExpandProcesses.setObjectName("btnExpandProcesses")
self.gridLayout.addWidget(self.btnExpandProcesses, 2, 1, 1, 1)
self.btnContractProcesses = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.btnContractProcesses.sizePolicy().hasHeightForWidth())
self.btnContractProcesses.setSizePolicy(sizePolicy)
self.btnContractProcesses.setObjectName("btnContractProcesses")
self.gridLayout.addWidget(self.btnContractProcesses, 2, 2, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.graph = QtWebEngineWidgets.QWebEngineView(self.centralwidget)
self.graph.setUrl(QtCore.QUrl("about:blank"))
self.graph.setObjectName("graph")
self.gridLayout.addWidget(self.graph, 1, 0, 1, 3)
self.ddlBigData = QtWidgets.QComboBox(self.centralwidget)
self.ddlBigData.setObjectName("ddlBigData")
self.gridLayout.addWidget(self.ddlBigData, 0, 1, 1, 2)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.btnExpandBooks = QtWidgets.QPushButton(self.centralwidget)
self.btnExpandBooks.setObjectName("btnExpandBooks")
self.gridLayout.addWidget(self.btnExpandBooks, 3, 1, 1, 1)
self.btnContractBooks = QtWidgets.QPushButton(self.centralwidget)
self.btnContractBooks.setObjectName("btnContractBooks")
self.gridLayout.addWidget(self.btnContractBooks, 3, 2, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
MainWindow.setMenuBar(self.menubar)
self.actionInicio = QtWidgets.QAction(MainWindow)
self.actionInicio.setObjectName("actionInicio")
self.actionInicio.triggered.connect(self.toMenu)
self.actionConsulta = QtWidgets.QAction(MainWindow)
self.actionConsulta.setObjectName("actionConsulta")
self.actionConsulta.triggered.connect(self.toGraph)
self.actionBusqueda = QtWidgets.QAction(MainWindow)
self.actionBusqueda.setObjectName("actionBusqueda")
self.actionBusqueda.triggered.connect(self.toSearchByWord)
self.actionDatoRegulado = QtWidgets.QAction(MainWindow)
self.actionDatoRegulado.setObjectName("actionDatoRegulado")
self.actionDatoRegulado.triggered.connect(self.toRegulatedData)
self.actionDashboard = QtWidgets.QAction(MainWindow)
self.actionDashboard.setObjectName("actionDashboard")
self.actionDashboard.triggered.connect(self.toDashboard)
self.actionAllData = QtWidgets.QAction(MainWindow)
self.actionAllData.setObjectName("actionAllData")
self.actionAllData.triggered.connect(self.toAllData)
self.actionResidenceBigData = QtWidgets.QAction(MainWindow)
self.actionResidenceBigData.setObjectName("actionResidenceBigData")
self.actionResidenceBigData.triggered.connect(self.toResidenceBigData)
self.actionProcessesBigData = QtWidgets.QAction(MainWindow)
self.actionProcessesBigData.setObjectName("actionProcessesBigData")
self.actionProcessesBigData.triggered.connect(self.toProcessesBigData)
self.actionResidenceAndProcessesBigData = QtWidgets.QAction(MainWindow)
self.actionResidenceAndProcessesBigData.setObjectName(
"actionResidenceAndProcessesBigData")
self.actionResidenceAndProcessesBigData.triggered.connect(
self.toResidenceAndProcessesBigData)
self.actionDataInventary = QtWidgets.QAction(MainWindow)
self.actionDataInventary.setObjectName("actionDataInventary")
self.actionDataInventary.triggered.connect(self.toDataInventary)
self.menuMenu.addAction(self.actionInicio)
self.menuMenu.addSeparator()
self.menuMenu.addAction(self.actionConsulta)
self.menuMenu.addAction(self.actionBusqueda)
self.menuMenu.addAction(self.actionDatoRegulado)
self.menuMenu.addAction(self.actionDashboard)
self.menuMenu.addAction(self.actionAllData)
self.menuMenu.addAction(self.actionResidenceBigData)
self.menuMenu.addAction(self.actionProcessesBigData)
self.menuMenu.addAction(self.actionResidenceAndProcessesBigData)
self.menuMenu.addAction(self.actionDataInventary)
self.menubar.addAction(self.menuMenu.menuAction())
self.connectToDb()
self.btnContractProcesses.clicked.connect(self.contractProcesses)
self.btnExpandProcesses.clicked.connect(self.expandProcesses)
self.btnContractBooks.clicked.connect(self.contractBooks)
self.btnExpandBooks.clicked.connect(self.expandBooks)
self.populateDropDown()
self.ddlBigData.currentTextChanged.connect(
self.populateDashboard)
self.show()
self.circleProcesses = 0
self.circleBooks = 0
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Procesos y residencia de Gran Dato"))
MainWindow.setWindowIcon(QtGui.QIcon('favicon.ico'))
self.btnExpandProcesses.setText(_translate("MainWindow", "Expandir"))
self.btnContractProcesses.setText(_translate("MainWindow", "Contraer"))
self.label.setText(_translate("MainWindow", "Gran dato"))
self.label_2.setText(_translate("MainWindow", "Procesos"))
self.label_3.setText(_translate("MainWindow", "Libros"))
self.btnExpandBooks.setText(_translate("MainWindow", "Expandir"))
self.btnContractBooks.setText(_translate("MainWindow", "Contraer"))
self.menuMenu.setTitle(_translate("MainWindow", "Menu"))
self.actionInicio.setText(_translate("MainWindow", "Inicio"))
self.actionConsulta.setText(_translate("MainWindow", "Gráfica de grandes datos en procesos y libros"))
self.actionBusqueda.setText(_translate("MainWindow", "Búsqueda"))
self.actionDatoRegulado.setText(
_translate("MainWindow", "Usos por dato regulado"))
self.actionDashboard.setText(_translate("MainWindow", "Dashboard"))
self.actionAllData.setText(_translate("MainWindow", "Linaje"))
self.actionResidenceBigData.setText(
_translate("MainWindow", "Residencia por Gran Dato"))
self.actionProcessesBigData.setText(
_translate("MainWindow", "Procesos por Gran dato"))
self.actionResidenceAndProcessesBigData.setText(
_translate("MainWindow", "Residencia y Procesos por Gran Dato"))
self.actionDataInventary.setText(
_translate("MainWindow", "Inventario de datos"))
def connectToDb(self):
accessDriver = r'Microsoft Access Driver (*.mdb, *.accdb)'
filepath = r'./AnaliticaData.accdb'
self.conn = pyodbc.connect(driver=accessDriver,
dbq=filepath, autocommit=True)
def exec(self, query, values=None):
cursor = self.conn.cursor()
if (values is not None):
cursor.execute(query, values)
else:
cursor.execute(query)
output = cursor.fetchall()
cursor.close()
return output
def show(self, fig=None, title=""):
raw_html = '<html><head><meta charset="utf-8" />'
raw_html += '<script src="{}"></script></head>'.format(local)
raw_html += '<body>'
if (fig is not None):
margin = dict(t=25, l=0, r=0, b=0)
if (title != ""):
fig.update_layout(margin=margin, title_text=title)
else:
fig.update_layout(margin=margin)
raw_html += plotly.offline.plot(fig,
include_plotlyjs=False, output_type='div')
raw_html += '</body></html>'
self.graph.setHtml(raw_html)
self.graph.show()
def getColumn(self, data, column):
output = []
for element in data:
value = element[column]
output.append(value)
return output
def populateDashboard(self):
self.circleProcesses = 0
self.circleBooks = 0
self.modifyCircles()
def modifyCircles(self):
bigdata = self.ddlBigData.currentText()
if (bigdata != 'Selecciona un dato'):
fig = make_subplots(rows=1, cols=2, specs=[
[{'type': 'domain'}, {'type': 'domain'}]])
title = 'Procesos y libros de {0}'.format(bigdata)
if (self.circleProcesses >= 0):
processes = self.exec(QUERYPROCESSES.format(bigdata))
parents = [""] + self.getColumn(processes, 0)
names = [bigdata] + self.getColumn(processes, 1)
counts = self.getColumn(processes, 2)
total = self.getTotal(counts)
values = [total] + counts
if (self.circleProcesses >= 1):
tables = self.exec(QUERYSUBPROCESSES.format(bigdata))
parents = parents + self.getColumn(tables, 0)
newValues = self.getColumn(tables, 2)
if (self.circleProcesses == 1):
names = names + \
self.getPercentages(self.getColumn(
tables, 1), newValues, total)
else:
names = names + self.getColumn(tables, 1)
values = values + newValues
fig.add_trace(go.Sunburst(
labels=names,
parents=parents,
values=values,
branchvalues="total",
), 1, 1)
bigdata = self.ddlBigData.currentText()
if (self.circleBooks >= 0):
books = self.exec(QUERYBOOKS.format(bigdata))
parents = [""] + self.getColumn(books, 0)
names = [bigdata] + self.getColumn(books, 1)
counts = self.getColumn(books, 2)
total = self.getTotal(counts)
values = [total] + counts
if (self.circleBooks >= 1):
tables = self.exec(QUERYTABLES.format(bigdata))
parents = parents + self.getColumn(tables, 0)
newValues = self.getColumn(tables, 2)
if (self.circleBooks == 1):
names = names + \
self.getPercentages(self.getColumn(
tables, 1), newValues, total)
else:
names = names + self.getColumn(tables, 1)
values = values + newValues
if (self.circleBooks >= 2):
fields = self.exec(QUERYFIELDS.format(bigdata))
parents = parents + self.getColumn(fields, 0)
newValues = self.getColumn(fields, 2)
names = names + \
self.getPercentages(self.getColumn(
fields, 1), newValues, total)
values = values + newValues
fig.add_trace(go.Sunburst(
labels=names,
parents=parents,
values=values,
branchvalues="total",
), 1, 2)
self.show(fig, title)
def modifyCirclesProcesses(self):
bigdata = self.ddlBigData.currentText()
if (bigdata != 'Selecciona un dato'):
if (self.circleProcesses >= 0):
processes = self.exec(QUERYPROCESSES.format(bigdata))
parents = [""] + self.getColumn(processes, 0)
names = [bigdata] + self.getColumn(processes, 1)
counts = self.getColumn(processes, 2)
total = self.getTotal(counts)
values = [total] + counts
title = 'Procesos de {0}'.format(bigdata)
if (self.circleProcesses >= 1):
tables = self.exec(QUERYSUBPROCESSES.format(bigdata))
parents = parents + self.getColumn(tables, 0)
newValues = self.getColumn(tables, 2)
if (self.circleProcesses == 1):
names = names + \
self.getPercentages(self.getColumn(
tables, 1), newValues, total)
else:
names = names + self.getColumn(tables, 1)
values = values + newValues
title = 'Procesos y documentos de {0}'.format(bigdata)
fig = go.Figure(go.Sunburst(
labels=names,
parents=parents,
values=values,
branchvalues="total",
))
self.show(fig, title)
def modifyCirclesBooks(self):
bigdata = self.ddlBigData.currentText()
if (bigdata != 'Selecciona un dato'):
if (self.circleBooks >= 0):
books = self.exec(QUERYBOOKS.format(bigdata))
parents = [""] + self.getColumn(books, 0)
names = [bigdata] + self.getColumn(books, 1)
counts = self.getColumn(books, 2)
total = self.getTotal(counts)
values = [total] + counts
if (self.circleBooks >= 1):
tables = self.exec(QUERYTABLES.format(bigdata))
parents = parents + self.getColumn(tables, 0)
newValues = self.getColumn(tables, 2)
if (self.circleBooks == 1):
names = names + \
self.getPercentages(self.getColumn(
tables, 1), newValues, total)
else:
names = names + self.getColumn(tables, 1)
values = values + newValues
if (self.circleBooks >= 2):
fields = self.exec(QUERYFIELDS.format(bigdata))
parents = parents + self.getColumn(fields, 0)
newValues = self.getColumn(fields, 2)
names = names + \
self.getPercentages(self.getColumn(
fields, 1), newValues, total)
values = values + newValues
fig = go.Figure(go.Sunburst(
labels=names,
parents=parents,
values=values,
branchvalues="total",
))
self.show(fig, 'Residencia de {0}'.format(bigdata))
def getTotal(self, a):
output = 0
for element in a:
output += element
return output
def getPercentages(self, names, values, total):
count = 0
for value in values:
names[count] += "<br>{:.1f} %".format(value / total * 100)
count += 1
return names
def populateDropDown(self):
data = self.getColumn(self.exec(QUERYDROPDOWN), 0)
data.insert(0, 'Selecciona un dato')
self.ddlBigData.addItems(data)
def expandProcesses(self):
if (self.circleProcesses < 1):
self.circleProcesses += 1
self.modifyCirclesProcesses()
def contractProcesses(self):
if (self.circleProcesses > 0):
self.circleProcesses -= 1
self.modifyCirclesProcesses()
def expandBooks(self):
if (self.circleBooks < 2):
self.circleBooks += 1
self.modifyCirclesBooks()
def contractBooks(self):
if (self.circleBooks > 0):
self.circleBooks -= 1
self.modifyCirclesBooks()
def setOptionsMenu(self, toMenu, toRegulatedData, toGraph, toSearchByWord,
toDashboard, toAllData, toResidenceBigData,
toProcessesBigData, toResidenceAndProcessesBigData,
toDataInventary):
self.toMenu = toMenu
self.toRegulatedData = toRegulatedData
self.toGraph = toGraph
self.toSearchByWord = toSearchByWord
self.toDashboard = toDashboard
self.toAllData = toAllData
self.toResidenceBigData = toResidenceBigData
| |
"yeh continue me phas gaya,lit me, fruit me, nahi hai")
continue
if feature.GetField("name") in dynadead:
print(feature.GetField("name"), "yeh continue me phas gaya,dynadead me nahi hai")
continue
if (feature.GetField("name")) == (id):
print(feature.GetField("name"), "end ho gaya")
s = "THE END"
return feature, (feature.GetGeometryRef().GetPoint(0)[0], feature.GetGeometryRef().GetPoint(0)[1]), s
print(feature.GetField("name"),"asdfghjklkjhgfdsasdfghjkjhgfdsdfghjkjhgfds")
a = featurelength(feature)
count = feature.GetGeometryRef().GetPointCount()
line_last_length, lastcord = generating_line_feature(fcord, (
feature.GetGeometryRef().GetPoint(count - 1)[0], feature.GetGeometryRef().GetPoint(count - 1)[1]))
line_start_length, startcord = generating_line_feature(fcord, (
feature.GetGeometryRef().GetPoint(0)[0], feature.GetGeometryRef().GetPoint(0)[1]))
b = min(line_last_length, line_start_length)
d = max(line_last_length, line_start_length)
kdic[line_start_length] = startcord
kdic[line_last_length] = lastcord
c, outcord = generating_line_feature((q, r), (kdic[d][0], kdic[d][1]))
start_end_cord[a + b + c] = (kdic[d][0], kdic[d][1])
fid[a + b + c] = (feature.GetField("name"))
route[a + b + c] = feature
liladhar.append(a + b + c)
# print(a, feature.GetField("name"))
#print(fid,"ROUTE LIST FROM NODE")
final_route, firstcord, s = check(liladhar, q, r, route, id, l, start_end_cord, lit,nodepath,networkpath)
return final_route, firstcord, s
def check(liladhar, q, r, route, id, l, start_end_cord, lit,nodepath,networkpath):
tup = (q, r)
s = "CONTINUE"
x = True
while x:
if len(liladhar) == 0:
return None, (0, 0), "NOT OK FINISH LIST"
d = min(liladhar)
# print("MINIMUM LENGTH FOR THE ABOVE SELECTED ROAD FEATURE IS", d)
w = start_end_cord[d]
f = route[d]
firstcord, pfeat = firstcord_1(w[0], w[1],nodepath,networkpath)
# print("CITY CENTERNODE CLOSEST TO THE END POINT OF THE SELECTED ROAD FEATURE", f.GetField("name"), "IS",pfeat.GetField("name"))
# print("NOW WE WILL CHECK THAT ROAD FEATURE GENERATING FROM THIS NODE IS OK OR NOT")
r = nodecheck(firstcord[0], firstcord[1], id, l, tup[0], tup[1], lit,nodepath,networkpath)
# print("node check", r)
if r == "ok":
return f, (w[0], w[1]), s
else:
liladhar.remove(d)
x = True
def nodecheck(x, y, id, l, q, r, lit,nodepath,networkpath):
li = []
s = "ok"
t = "not ok"
pointdatasource = open_File(path=nodepath)
pointlayer = pointdatasource.GetLayerByIndex(0)
datasource = open_File(path=networkpath)
layer = datasource.GetLayerByIndex(0)
for feature in pointlayer:
if feature.GetGeometryRef().GetX() == x and feature.GetGeometryRef().GetY() == y:
layer.SetSpatialFilter(feature.GetGeometryRef().Buffer(0.00010))
for feat in layer:
if feat.GetField("name") not in lit:
continue
if feat.GetField("name") not in l and feat.GetField("name") not in li:
o = (feat.GetField("name"))
# print("ROAD FEATURE EMERGING FROM THIS NODE", feature.GetField("name"), "is", o)
if o == id:
# print("THE ROAD FEATURE IS CRIME LINK HENCE SEARCH IS OVER")
return s
# print("CHECKING WHETHER THIS ROAD FEATURE IS TOWARD CRIME LOCATION OR AWAY FROM IT")
num = mindist(o, q, r, x, y, lit,nodepath,networkpath)
if num is not None:
# print("id of road feature toward crime location", num)
li.append(num)
if len(li) != 0:
return s
else:
return t
def mindist(b, q, r, x, y, lit,nodepath,networkpath):
dictionary = {}
datasource = open_File(path=networkpath)
for layer in datasource:
for feat in layer:
if feat.GetField("name") not in lit:
continue
if feat.GetField("name") == b:
a = feat
count = a.GetGeometryRef().GetPointCount()
lengthend, tupcordend = generating_line_feature((q, r), (
a.GetGeometryRef().GetPoint(count - 1)[0], a.GetGeometryRef().GetPoint(count - 1)[1]))
dictionary[lengthend] = tupcordend
lengthstart, tupcordstart = generating_line_feature((q, r), (
a.GetGeometryRef().GetPoint(0)[0], a.GetGeometryRef().GetPoint(0)[1]))
dictionary[lengthstart] = tupcordstart
w = farendcord((x, y), b,nodepath,networkpath)
# print(w)
d = min(lengthend, lengthstart)
if dictionary[d] == (w[0], w[1]):
# print("THE ROAD FEATURE WHICH IS TOWARD CRIME LINK IS", "=", b)
return b
else:
return None
def firstcord_1(c, d,nodepath,networkpath):
cord = {}
id = {}
linelength = []
pointdatasource = open_File(path=nodepath)
pointlayer = pointdatasource.GetLayerByIndex(0)
pointfeature = {}
for feature in pointlayer:
a = (feature.GetGeometryRef().GetX(), feature.GetGeometryRef().GetY())
b = (c, d)
l = haversine(a, b)
linelength.append(l)
cord[l] = (feature.GetGeometryRef().GetX(), feature.GetGeometryRef().GetY())
id[l] = (feature.GetField("name"))
pointfeature[l] = feature
a = min(linelength)
firstcord = cord[a]
pfeat = pointfeature[a]
pointfeature.clear()
return firstcord, pfeat
def nearest_policia(path, crimepath):
d = {}
patht = {}
l = []
crimedatasource = open_File(crimepath)
clayer = crimedatasource.GetLayerByIndex(0)
cfeat = clayer.GetFeature(0)
datasource = open_File(path)
layer = datasource.GetLayerByIndex(0)
for feat in layer:
pointA = (feat.GetGeometryRef().GetX(), feat.GetGeometryRef().GetY())
pointB = (cfeat.GetGeometryRef().GetX(), cfeat.GetGeometryRef().GetY())
length = haversine(pointA, pointB)
patht[length] = (feat.GetField("name"))
d[length] = feat
l.append(length)
return d[min(l)].GetGeometryRef().GetX(), d[min(l)].GetGeometryRef().GetY()
def a(flink, t):
l = {}
datasource = open_File(path="C:/Users\Hp\Desktop\DATA\ROADNETWORK\ROADNETWORK.shp")
for layer in datasource:
for feat in layer:
if feat.GetField("name") == flink:
a = feat
count = a.GetGeometryRef().GetPointCount()
w = ((a.GetGeometryRef().GetPoint(count - 1)[0], a.GetGeometryRef().GetPoint(count - 1)[1]),
(a.GetGeometryRef().GetPoint(0)[0], a.GetGeometryRef().GetPoint(0)[1]))
l[haversine(w[0], t)] = w[0]
l[haversine(w[1], t)] = w[1]
pointdatasource = open_File(path="C:/Users\Hp\Desktop\DATA/NODES/NODES.shp")
pointlayer = pointdatasource.GetLayerByIndex(0)
'''for feature in pointlayer:
if (feature.GetGeometryRef().GetX(), feature.GetGeometryRef().GetY()) == w[0]:
#print(feature.GetField("name"))
elif (feature.GetGeometryRef().GetX(), feature.GetGeometryRef().GetY()) == w[1]:
# print(feature.GetField("name"))'''
xmin = l[min(haversine(w[0], t), haversine(w[1], t))]
ymax = l[max(haversine(w[0], t), haversine(w[1], t))]
return xmin, ymax
def shortest_path_forward(q, r, c, d, cimelink, plink, policenodefeat, policenodecord, cimenodecord,dynadead,fruit,nodepath,networkpath):
cordlist = []
finalroute = []
roadId = []
l, lit = deadendremoval(q, r, c, d, cimelink, plink, cimenodecord, policenodecord,nodepath,networkpath)
# firstcord, pfeat=firstcord_1(c,d)
cordlist.append(policenodecord)
x = True
while x:
final_route, firstcord, s = roadlink(policenodefeat, policenodecord, float(cimenodecord[0]),
float(cimenodecord[1]), cimelink, l, roadId, lit,dynadead,fruit,nodepath,networkpath)
if s == "NOT OK FINISH LIST":
# print(s)
break
policenodecord, policenodefeat = firstcord_1(firstcord[0], firstcord[1],nodepath,networkpath)
cordlist.append(firstcord)
finalroute.append(final_route)
roadId.append((final_route.GetField("name")))
# print ("THE PATH COMPRISES OF FOLLOWING LINK",roadId,"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
if s == "THE END":
x = False
# print("LOCATION REACHED")
# print(roadId)
return roadId
def shortest_path_reverse(q, r, c, d, cimelink, plink, policenodefeat, policenodecord, cimenodecord,dynadead,fruit,nodepath,networkpath):
cordlist = []
finalroute = []
revroadId = []
l, lit = deadendremoval(q, r, c, d, cimelink, plink, cimenodecord, policenodecord,nodepath,networkpath)
# firstcord, pfeat=firstcord_1(c,d)
cordlist.append(policenodecord)
x = True
while x:
final_route, firstcord, s = roadlink(policenodefeat, policenodecord, float(cimenodecord[0]),
float(cimenodecord[1]), cimelink, l, revroadId, lit,dynadead,fruit,nodepath,networkpath)
if s == "NOT OK FINISH LIST":
# print(s)
break
policenodecord, policenodefeat = firstcord_1(firstcord[0], firstcord[1],nodepath,networkpath)
cordlist.append(firstcord)
finalroute.append(final_route)
revroadId.append((final_route.GetField("name")))
# print ("THE PATH COMPRISES OF FOLLOWING LINK",roadId,"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
if s == "THE END":
x = False
# print("LOCATION REACHED")
# print(revroadId)
return revroadId
def commonid(forwardroute, reverseroute):
print(forwardroute, "forwardroute", reverseroute, "reverseroute")
l = []
x = 0
for element in range(len(forwardroute)):
a = []
if forwardroute[element] in reverseroute:
for m, n in enumerate(forwardroute):
if n == forwardroute[element]:
a.append(m)
print(m, forwardroute[element])
for i, j in enumerate(reverseroute):
if j == forwardroute[element]:
a.append(i)
print(i, forwardroute[element])
l.append(a)
print(l)
return l
def list_of_possible_path(forwardroute, reverseroute, l):
final_route = []
for t in range(len(l)):
final_sub_route = []
for element in range(len(forwardroute)):
if element < l[t][0]:
final_sub_route.append(forwardroute[element])
for element in range(len(reverseroute)):
if element < (l[t][1] + 1):
final_sub_route.append(reverseroute[l[t][1] - element])
final_route.append(final_sub_route)
print(final_route)
return final_route
def route_id_list(possible_route,nodepath,networkpath):
lengthd = {}
lengthl = []
for i in range(len(possible_route)):
a = 0
for j in range(len(possible_route[i])):
datasource = open_File(path=networkpath)
for layer in datasource:
for f in layer:
if f.GetField("name")==possible_route[i][j]:
a = a + featurelength(f)
lengthd[a] = i
lengthl.append(a)
a = 0
print(lengthl,lengthd)
x = lengthd[min(lengthl)]
return possible_route[x],min(lengthl)
def wktlist(finalroute,cli,pli,nodepath,networkpath):
li = []
datasource = open_File(path=networkpath)
for layer in datasource:
for feat in layer:
if feat.GetField("name") in finalroute and feat.GetField("name") !=cli and feat.GetField("name") !=pli :
print(feat.GetGeometryRef().ExportToWkt(),type(feat.GetGeometryRef().ExportToWkt()))
li.append(feat.GetGeometryRef().ExportToWkt())
return li
def wktsingle(froute,networkpath):
datasource = open_File(path=networkpath)
for layer in datasource:
for feat in layer:
if feat.GetField("name") in froute:
return feat.GetGeometryRef().ExportToWkt()
def route(li,path,name):
spatialReference = osgeo.osr.SpatialReference()
spatialReference.SetWellKnownGeogCS("WGS84")
driver = osgeo.ogr.GetDriverByName("ESRI Shapefile")
folderLocation, folderName = creating_directory(path,name)
dstPath = os.path.join(folderLocation, "%s.shp" % (name))
dstFile = driver.CreateDataSource("%s" % dstPath)
dstLayer = dstFile.CreateLayer("resulting layer", spatialReference)
for i in li:
feature = osgeo.ogr.Feature(dstLayer.GetLayerDefn())
line = ogr.CreateGeometryFromWkt(i)
feature.SetGeometry(line)
dstLayer.CreateFeature(feature)
feature.Destroy()
dstFile.Destroy()
def shortroute(q, r, c, d, cimelink, plink, policenodefeat, policenodecord, crimenodefeat, crimenodecord, cimenodecord):
forwardroute = shortest_path_forward(q, r, c, d, cimelink, plink, policenodefeat, policenodecord, cimenodecord)
print("THE FORWARD ROUTE IS AS FOLLOWS", forwardroute)
reverseroute = shortest_path_reverse(c, d, q, r, plink, cimelink, crimenodefeat, crimenodecord, policenodecord)
print("THE REVERSE ROUTE IS AS FOLLOWS", reverseroute)
l = commonid(forwardroute, reverseroute)
print("COMMON ID Are AS FOLLOWS", l)
possible_route = list_of_possible_path(forwardroute, reverseroute, l)
print("THE POSSIBLE ROUTES ARE AS FOLLOWS", possible_route)
froute = route_id_list(possible_route)
print(froute,
"result@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
lk = wktlist(froute)
route(lk)
return froute
def bufgeo(point, dist):
pointA = point[0]
pointB = point[1]
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(pointA, pointB)
poly = point.Buffer(dist)
return poly
def listofnode(flink, plink,nodepath,networkpath):
policenodelist = {}
crimenodelist = {}
datasource = open_File(path=networkpath)
for layer in datasource:
for feat in layer:
if feat.GetField("name") == flink:
count = feat.GetGeometryRef().GetPointCount()
p = bufgeo((feat.GetGeometryRef().GetPoint(count - 1)[0], feat.GetGeometryRef().GetPoint(count - 1)[1]),
0.00010)
qd = bufgeo((feat.GetGeometryRef().GetPoint(0)[0], feat.GetGeometryRef().GetPoint(0)[1]), 0.00010)
nodedatasource = open_File(path=nodepath)
for k in nodedatasource:
k.SetSpatialFilter(p)
for w in k:
crimenodelist[w] = (w.GetField("name"))
k.SetSpatialFilter(qd)
for e in k:
crimenodelist[e] = (e.GetField("name"))
if feat.GetField("name") == plink:
count = feat.GetGeometryRef().GetPointCount()
p = bufgeo((feat.GetGeometryRef().GetPoint(count - 1)[0], feat.GetGeometryRef().GetPoint(count - 1)[1]),
0.00010)
qd = bufgeo((feat.GetGeometryRef().GetPoint(0)[0], feat.GetGeometryRef().GetPoint(0)[1]), 0.00010)
nodedatasource = open_File(path=nodepath)
for k in | |
<filename>tti/utils/trading_simulation.py
"""
Trading-Technical-Indicators (tti) python library
File name: trading_simulation.py
Trading simulation class implementation defined under the tti.utils
package.
"""
import pandas as pd
import numpy as np
from ..utils.data_validation import validateInputData
from ..utils.exceptions import WrongTypeForInputParameter, \
NotValidInputDataForSimulation, WrongValueForInputParameter
class TradingSimulation:
"""
Trading Simulation class implementation. Provides utilities methods for
the getTiSimulation method of the tti.indicators package.
Args:
input_data_index (pandas.DateTimeIndex): The indicator's input data
index. Is used for validating that the close values DataFrame
includes data for the whole simulation period.
close_values (pandas.DataFrame): The close prices of the stock, for
the whole simulation period. Index is of type DateTimeIndex
with same values as the input to the indicator data. It
contains one column ``close``.
max_exposure(float, default=None): Maximum allowed exposure for all the
opened positions (``short`` and ``long``). If the exposure reaches
this threshold, no further positions are being opened. A new
position can be opened again only when exposure reduces through a
position close. If set to None, then there is no upper limit for
the opened positions (exposure). When a new ``long`` position is
opened, exposure is increased by the ``stock_price``. When a
``short`` position is opened, exposure is increased by the
``short_exposure_factor * stock_price``. Values >0.0 or None are
supported.
short_exposure_factor (float, default=1.5): The exposure factor when
a new ``short`` position is opened. Usually is above 1.0 and it
is used as security when a short position is opened. Values >=1.0
are supported.
Attributes:
_input_data_index (pandas.DateTimeIndex): The indicator's input data
index. Is used for validating that the close values DataFrame
includes data for the whole simulation period.
_close_values (numpy.ndarray): The close prices of the stock, for
the whole simulation period.
_max_exposure(float, default=None): Maximum allowed exposure for all
the opened positions (``short`` and ``long``). If the exposure
reaches this threshold, no further positions are being opened. A
new position can be opened again only when exposure reduces through
a position close. If set to None, then there is no upper limit for
the opened positions (exposure). When a new ``long`` position is
opened, exposure is increased by the ``stock_price``. When a
``short`` position is opened, exposure is increased by the
``short_exposure_factor * stock_price``. Values >0.0 or None are
supported.
_short_exposure_factor (float, default=1.5): The exposure factor when
a new ``short`` position is opened. Usually is above 1.0 and it
is used as security when a short position is opened. Values >=1.0
are supported.
_portfolio (pandas.DataFrame): Simulation portfolio, keeps a track of
the entered positions during the simulation. Position: ``long``,
``short`` or ``none``. Status: ``open``, ``close`` or none.
Exposure: ``stock_price`` when position is ``long``, and
``short_exposure_factor * stock_price`` when position is ``short``.
_simulation_data (pandas.DataFrame): Dataframe which holds details and
about the simulation. The index of the dataframe is the whole
trading period(DateTimeIndex). Columns are:
``signal``: the signal produced at each day of the simulation
period.
``open_trading_action``: the open trading action applied. Possible
values are ``long``, ``short`` and ``none``.
``stock_value``: The value of the stock during the simulation
period.
``exposure``: The accumulated exposure during the simulation
period. Increased by ``stock_price`` when a ``long`` position is
opened, and by ``short_exposure_factor * stock_price`` when a
``short`` position is opened. Reduced by the same amounts when
relevant positions are being closed.
``portfolio_value``: The portfolio value during the simulation
period, ``current_stock_price * (opened_long - opened_short)``.
``earnings``: The accumulated earnings during the simulation
period. Increased by the ``current_price - opened_position_price``
when a ``long`` position is closed. Increased by the
``opened_position_price - current_price`` when a ``short`` position
is closed.
``balance``: The balance during the simulation period. It is the
``earnings + portfolio_value``.
_statistics (dict): Statistics about the simulation. contains the below
keys:
``number_of_trading_days``: the number of trading days in the
simulation round.
``number_of_buy_signals``: the number of ``buy`` signals produced
during the simulation period.
``number_of_ignored_buy_signals``: the number of ``buy`` signals
ignored because of the ``max_exposure`` limitation.
``number_of_sell_signals``: the number of ``sell`` signals produced
during the simulation period.
``number_of_ignored_sell_signals``: the number of ``sell`` signals
ignored because of the ``max_exposure`` limitation.
``last_stock_value``: The value of the stock at the end of the
simulation.
``last_exposure``: The ``exposure`` value at the end of the
simulation period.
``last_open_long_positions``: The number of the still opened
``long`` positions at the end of the simulation period.
``last_open_short_positions``: The number of the still opened
``short`` positions at the end of the simulation period.
``last_portfolio_value``: The ``portfolio_value`` at the end of the
simulation period.
``last_earnings``: The ``earnings`` at the end of the simulation
period.
``final_balance``: The ``balance`` at the end of the simulation
period.
Raises:
WrongTypeForInputParameter: Input argument has wrong type.
WrongValueForInputParameter: Unsupported value for input argument.
NotValidInputDataForSimulation: Invalid ``close_values`` `passed for
the simulation.
"""
def __init__(self, input_data_index, close_values, max_exposure=None,
short_exposure_factor=1.5):
self._input_data_index = input_data_index
self._close_values = close_values
self._max_exposure = max_exposure
self._short_exposure_factor = short_exposure_factor
# Validate input arguments
self._validateSimulationArguments()
# Simulation portfolio, keeps a track of the entered positions during
# the simulation. Position: `long`, `short` or `none`. Status: `open`,
# `close` or none. Exposure: stock_price when position is long, and
# short_exposure_factor * stock_price when position is short. Use
# numpy array improved performance.
# Columns are:
# position: 0.0 is None, 1.0 is short, 2.0 is long
# status: 0.0 is None, 1.0 is open, 2.0 is closed
# exposure: float indicating the exposure value
self._portfolio = np.zeros(shape=(len(self._input_data_index), 3),
dtype=np.float64)
# Change type to numpy array for better performance
self._close_values = self._close_values.to_numpy(dtype=np.float64,
copy=True)
# Initialize simulation data structure (DataFrame)
self._simulation_data = pd.DataFrame(
index=self._input_data_index,
columns=['signal', 'open_trading_action', 'stock_value',
'exposure', 'portfolio_value', 'earnings', 'balance'
],
data=None)
# Initialize statistics data structure (dict)
self._statistics = {
'number_of_trading_days': 0,
'number_of_buy_signals': 0,
'number_of_ignored_buy_signals': 0,
'number_of_sell_signals': 0,
'number_of_ignored_sell_signals': 0,
'last_stock_value': 0.0,
'last_exposure': 0.0,
'last_open_long_positions': 0,
'last_open_short_positions': 0,
'last_portfolio_value': 0.0,
'last_earnings': 0.0,
'final_balance': 0.0}
def _validateSimulationArguments(self):
"""
Validates the input arguments passed in the constructor. input_data and
ti_data are already validated by the tti.indicators package.
Raises:
WrongTypeForInputParameter: Input argument has wrong type.
WrongValueForInputParameter: Unsupported value for input argument.
NotValidInputDataForSimulation: Invalid ``close_values`` `passed
for the simulation.
"""
# Validate input_data_index that is an index
if not isinstance(self._input_data_index, pd.DatetimeIndex):
raise NotValidInputDataForSimulation(
'input_data_index', 'input_data_index should be of type ' +
'pandas.DatetimeIndex but type ' +
str(type(self._input_data_index)) +
' found.')
# Sort data
self._input_data_index = self._input_data_index.sort_values(
ascending=True)
# Validate close_values pandas.DataFrame
try:
self._close_values = validateInputData(
input_data=self._close_values, required_columns=['close'],
indicator_name='TradingSimulation',
fill_missing_values=True)
except Exception as e:
raise NotValidInputDataForSimulation(
'close_values', str(e).replace('input_data', 'close_values'))
if not self._close_values.index.equals(self._input_data_index):
raise NotValidInputDataForSimulation(
'close_values', 'Index of the `close_values` DataFrame ' +
'should be the same as the index of the ' +
'`input_data` argument in the indicator\'s ' +
'constructor.')
# Validate max_exposure
if isinstance(self._max_exposure, (int, float)):
if self._max_exposure <= 0:
raise WrongValueForInputParameter(
self._max_exposure, 'max_exposure', '>0 or None')
elif self._max_exposure is None:
pass
else:
raise WrongTypeForInputParameter(
type(self._max_exposure), 'max_exposure',
'int or float or None')
# Validate short_exposure_factor
if isinstance(self._short_exposure_factor, (int, float)):
if self._short_exposure_factor < 1.0:
raise WrongValueForInputParameter(
self._short_exposure_factor, 'short_exposure_factor',
'>=1.0')
else:
raise WrongTypeForInputParameter(
type(self._short_exposure_factor), 'short_exposure_factor',
'int or float')
def _calculateSimulationStatistics(self):
"""
Calculate simulation statistics, at the end of the simulation.
"""
# Simulation rounds which have been executed till now
executed_simulation_rounds = len(
self._simulation_data.dropna(subset=['signal'],
inplace=False).index)
self._statistics = {
'number_of_trading_days': executed_simulation_rounds,
'number_of_buy_signals':
len(self._simulation_data[
self._simulation_data['signal'] == 'buy'].index),
'number_of_ignored_buy_signals':
len(self._simulation_data[
(self._simulation_data['signal'] == 'buy') &
(self._simulation_data['open_trading_action'] == 'none')].
index),
'number_of_sell_signals':
len(self._simulation_data[
self._simulation_data['signal'] == 'sell'].index),
'number_of_ignored_sell_signals':
len(self._simulation_data[
(self._simulation_data['signal'] == 'sell') &
(self._simulation_data['open_trading_action'] == 'none')]
.index),
'last_stock_value': 0.0 if executed_simulation_rounds == 0
else self._simulation_data['stock_value'].iat[
executed_simulation_rounds - 1].round(2),
'last_exposure': 0.0 if executed_simulation_rounds == 0
else round(self._simulation_data['exposure'].iat[
executed_simulation_rounds - 1], 2),
'last_open_long_positions': np.count_nonzero(
self._portfolio[
(self._portfolio[:, 0] == 2.0) &
(self._portfolio[:, 1] == 1.0), 0]),
'last_open_short_positions': np.count_nonzero(
self._portfolio[
(self._portfolio[:, 0] == 1.0) &
(self._portfolio[:, 1] == 1.0), 0]),
'last_portfolio_value': 0.0 if executed_simulation_rounds == 0
else round(self._simulation_data['portfolio_value'].iat[
executed_simulation_rounds - 1], 2),
'last_earnings': 0.0 if executed_simulation_rounds == 0
else round(self._simulation_data['earnings'].iat[
executed_simulation_rounds - 1], 2),
'final_balance': 0.0 if executed_simulation_rounds == 0
else round(self._simulation_data['balance'].iat[
executed_simulation_rounds - 1], 2)
}
def _calculatePortfolioValue(self, i_index):
"""
Calculate the portfolio value (for the opened positions).
Args:
i_index (int): The integer index of the current simulation round.
Refers to the | |
= GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
pointings_ha = pointings_hadec[:,0]
pointings_ha[pointings_ha > 180.0] = pointings_ha[pointings_ha > 180.0] - 360.0
pointings_ra = pointings_radec[:,0]
pointings_ra[pointings_ra > 180.0] = pointings_ra[pointings_ra > 180.0] - 360.0
pointings_dec = pointings_radec[:,1]
infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
hdulist = fits.open(infile)
lst_select = hdulist['POINTING AND PHASE CENTER INFO'].data['LST']
hdulist.close()
lst_select[lst_select > 180.0] -= 360.0
fig = PLT.figure(figsize=(6,6))
ax1a = fig.add_subplot(111)
ax1a.set_xlabel('Local Sidereal Time [hours]', fontsize=18, weight='medium')
ax1a.set_ylabel('Longitude [degrees]', fontsize=18, weight='medium')
ax1a.set_xlim((lst_wrapped.min()-1)/15.0, (lst_wrapped.max()-1)/15.0)
ax1a.set_ylim(pointings_ha.min()-15.0, pointings_ha.max()+15.0)
ax1a.plot(lst_wrapped/15.0, pointings_ha, 'k--', lw=2, label='HA')
ax1a.plot(lst_wrapped/15.0, pointings_ra, 'k-', lw=2, label='RA')
for i in xrange(lst_select.size):
if i == 0:
ax1a.axvline(x=lst_select[i]/15.0, color='gray', ls='-.', lw=2, label='Selected LST')
else:
ax1a.axvline(x=lst_select[i]/15.0, color='gray', ls='-.', lw=2)
ax1a.tick_params(which='major', length=18, labelsize=12)
ax1a.tick_params(which='minor', length=12, labelsize=12)
# legend1a = ax1a.legend(loc='lower right')
# legend1a.draw_frame(False)
for axis in ['top','bottom','left','right']:
ax1a.spines[axis].set_linewidth(2)
xticklabels = PLT.getp(ax1a, 'xticklabels')
yticklabels = PLT.getp(ax1a, 'yticklabels')
PLT.setp(xticklabels, fontsize=15, weight='medium')
PLT.setp(yticklabels, fontsize=15, weight='medium')
ax1b = ax1a.twinx()
ax1b.set_ylabel('Declination [degrees]', fontsize=18, weight='medium')
ax1b.set_ylim(pointings_dec.min()-5.0, pointings_dec.max()+5.0)
ax1b.plot(lst_wrapped/15.0, pointings_dec, 'k:', lw=2, label='Dec')
ax1b.tick_params(which='major', length=12, labelsize=12)
# legend1b = ax1b.legend(loc='upper right')
# legend1b.draw_frame(False)
yticklabels = PLT.getp(ax1b, 'yticklabels')
PLT.setp(yticklabels, fontsize=15, weight='medium')
decline = PLT.Line2D(range(1), range(0), color='k', ls=':', lw=2)
haline = PLT.Line2D(range(1), range(0), color='k', ls='--', lw=2)
raline = PLT.Line2D(range(1), range(0), color='k', ls='-', lw=2)
lstline = PLT.Line2D(range(1), range(0), color='gray', ls='-.', lw=2)
legend = PLT.legend((haline, raline, decline, lstline), ('HA', 'RA', 'Dec', 'Chosen LST'), loc='lower right', frameon=False)
fig.subplots_adjust(right=0.85)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+obs_mode+'_pointings.eps', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+obs_mode+'_pointings.png', bbox_inches=0)
#############################################################################
if plot_02 or plot_03 or plot_04 or plot_12:
infile = '/data3/t_nithyanandan/'+project_dir+'/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
hdulist = fits.open(infile)
n_snaps = hdulist[0].header['n_acc']
lst = hdulist['POINTING AND PHASE CENTER INFO'].data['LST']
hdulist.close()
backdrop_xsize = 100
xmin = -180.0
xmax = 180.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmax, xmin, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
pb_snapshots = []
pbx_MWA_snapshots = []
pby_MWA_snapshots = []
src_ind_csm_snapshots = []
src_ind_gsm_snapshots = []
dsm_snapshots = []
if plot_03 or plot_12:
freq_SUMSS = 0.843 # in GHz
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = NP.copy(fint)
nvss_file = '/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits'
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
ra_deg_wrapped = ra_deg.ravel()
ra_deg_wrapped[ra_deg_wrapped > 180.0] -= 360.0
# csmctlg = SM.Catalog(catlabel, freq_catalog, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), fluxes, spectral_index=spindex, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
csmskymod = SM.SkyModel(catlabel, freq, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
dsm_file = '/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq/1e6,nside)
hdulist = fits.open(dsm_file)
dsm_table = hdulist[1].data
dsm_ra_deg = dsm_table['RA']
dsm_dec_deg = dsm_table['DEC']
dsm_temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
dsm = HP.cartview(dsm_temperatures.ravel(), coord=['G','C'], rot=[0,0,0], xsize=backdrop_xsize, return_projected_map=True)
dsm = dsm.ravel()
for i in xrange(n_snaps):
havect = lst[i] - xvect
altaz = GEOM.hadec2altaz(NP.hstack((havect.reshape(-1,1),yvect.reshape(-1,1))), latitude, units='degrees')
dircos = GEOM.altaz2dircos(altaz, units='degrees')
roi_altaz = NP.asarray(NP.where(altaz[:,0] >= 0.0)).ravel()
az = altaz[:,1] + 0.0
az[az > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_sector_altaz = NP.asarray(NP.where(NP.logical_or(NP.logical_and(az[roi_altaz] >= -0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < -0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors), NP.logical_and(az[roi_altaz] >= 180.0 - 0.5*180.0/n_sky_sectors + sky_sector*180.0/n_sky_sectors, az[roi_altaz] < 180.0 - 0.5*180.0/n_sky_sectors + (sky_sector+1)*180.0/n_sky_sectors)))).ravel()
pb = NP.empty(xvect.size)
pb.fill(NP.nan)
pbx_MWA_vect = NP.empty(xvect.size)
pbx_MWA_vect.fill(NP.nan)
pby_MWA_vect = NP.empty(xvect.size)
pby_MWA_vect.fill(NP.nan)
pb[roi_altaz] = PB.primary_beam_generator(altaz[roi_altaz,:], freq, telescope=telescope, skyunits='altaz', freq_scale='Hz', pointing_info=roi.pinfo[i])
if (telescope_id == 'mwa') or (phased_array):
pbx_MWA, pby_MWA = MWAPB.MWA_Tile_advanced(NP.radians(90.0-altaz[roi_altaz,0]).reshape(-1,1), NP.radians(altaz[roi_altaz,1]).reshape(-1,1), freq=185e6, delays=roi.pinfo[i]['delays']/435e-12)
pbx_MWA_vect[roi_altaz] = pbx_MWA.ravel()
pby_MWA_vect[roi_altaz] = pby_MWA.ravel()
pb_snapshots += [pb]
pbx_MWA_snapshots += [pbx_MWA_vect]
pby_MWA_snapshots += [pby_MWA_vect]
if plot_03 or plot_12:
# csm_hadec = NP.hstack(((lst[i]-csmctlg.location[:,0]).reshape(-1,1), csmctlg.location[:,1].reshape(-1,1)))
csm_hadec = NP.hstack(((lst[i]-csmskymod.location[:,0]).reshape(-1,1), csmskymod.location[:,1].reshape(-1,1)))
csm_altaz = GEOM.hadec2altaz(csm_hadec, latitude, units='degrees')
roi_csm_altaz = NP.asarray(NP.where(csm_altaz[:,0] >= 0.0)).ravel()
src_ind_csm_snapshots += [roi_csm_altaz]
dsm_snapshot = NP.empty(xvect.size)
dsm_snapshot.fill(NP.nan)
dsm_snapshot[roi_altaz] = dsm[roi_altaz]
dsm_snapshots += [dsm_snapshot]
if plot_02:
descriptor_str = ['off-zenith', 'zenith']
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
pbsky = axs[j].imshow(pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-3, vmax=1.0), cmap=CM.jet)
axs[j].set_xlim(xvect.max(), xvect.min())
axs[j].set_ylim(yvect.min(), yvect.max())
axs[j].grid(True, which='both')
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].locator_params(axis='x', nbins=5)
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.9, 0.122, 0.02, 0.84])
cbar = fig.colorbar(pbsky, cax=cbax, orientation='vertical')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.9)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'.eps', bbox_inches=0)
# Plot each snapshot separately
for j in xrange(n_snaps):
fig = PLT.figure(figsize=(6,4))
ax = fig.add_subplot(111)
pbsky = ax.imshow(pb_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=1e-5, vmax=1.0), cmap=CM.jet)
ax.set_xlim(xvect.max(), xvect.min())
ax.set_ylim(yvect.min(), yvect.max())
ax.grid(True, which='both')
ax.set_aspect('auto')
ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium')
ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium')
# ax.tick_params(which='major', length=12, labelsize=12)
# ax.tick_params(which='minor', length=6)
# ax.locator_params(axis='x', nbins=5)
# ax.text(0.5, 0.9, descriptor_str[j], transform=ax.transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.9, 0.15, 0.02, 0.81])
cbar = fig.colorbar(pbsky, cax=cbax, orientation='vertical')
# PLT.tight_layout()
fig.subplots_adjust(right=0.89)
fig.subplots_adjust(top=0.96)
fig.subplots_adjust(bottom=0.15)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'_snapshot_{0:1d}.png'.format(j), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'powerpattern_'+ground_plane_str+snapshot_type_str+obs_mode+'_snapshot_{0:1d}.eps'.format(j), bbox_inches=0)
if plot_03 or plot_12:
# csm_fluxes = csmctlg.flux_density * (freq/csmctlg.frequency)**csmctlg.spectral_index
csm_fluxes = csmskymod.spec_parms['flux-scale'] * (freq/csmskymod.spec_parms['freq-ref'])**csmskymod.spec_parms['power-law-index']
if plot_03:
# 03) Plot foreground models with power pattern contours for snapshots
descriptor_str = ['off-zenith', 'zenith']
n_fg_ticks = 5
fg_ticks = NP.round(NP.logspace(NP.log10(dsm.min()), NP.log10(dsm.max()), n_fg_ticks)).astype(NP.int)
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
dsmsky = axs[j].imshow(dsm_snapshots[j].reshape(-1,backdrop_xsize), origin='lower', extent=(NP.amax(xvect), NP.amin(xvect), NP.amin(yvect), NP.amax(yvect)), norm=PLTC.LogNorm(vmin=dsm.min(), vmax=dsm.max()), cmap=CM.jet)
pbskyc = axs[j].contour(xgrid[0,:], ygrid[:,0], pb_snapshots[j].reshape(-1,backdrop_xsize), levels=[0.001953125, 0.0078125, 0.03125, 0.125, 0.5], colors='k', linewidths=1.5)
axs[j].set_xlim(xvect.max(), xvect.min())
axs[j].set_ylim(yvect.min(), yvect.max())
axs[j].grid(True, which='both')
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].locator_params(axis='x', nbins=5)
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.85, 0.125, 0.02, 0.84])
cbar = fig.colorbar(dsmsky, cax=cbax, orientation='vertical')
cbar.set_ticks(fg_ticks.tolist())
cbar.set_ticklabels(fg_ticks.tolist())
cbax.set_ylabel('Temperature [K]', labelpad=0, fontsize=14)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
# PLT.tight_layout()
fig.subplots_adjust(right=0.85)
fig.subplots_adjust(top=0.98)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/dsm.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/dsm.eps', bbox_inches=0)
n_fg_ticks = 5
fg_ticks = NP.round(NP.logspace(NP.log10(csm_fluxes.min()), NP.log10(csm_fluxes.max()), n_fg_ticks)).astype(NP.int)
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
csmsky = axs[j].scatter(ra_deg_wrapped[src_ind_csm_snapshots[j]], dec_deg[src_ind_csm_snapshots[j]], c=csm_fluxes[src_ind_csm_snapshots[j]], norm=PLTC.LogNorm(vmin=csm_fluxes.min(), vmax=csm_fluxes.max()), cmap=CM.jet, edgecolor='none', s=20)
pbskyc = axs[j].contour(xgrid[0,:], ygrid[:,0], pb_snapshots[j].reshape(-1,backdrop_xsize), levels=[0.001953125, 0.0078125, 0.03125, 0.125, 0.5], colors='k', linewidths=1.5)
axs[j].set_xlim(xvect.max(), xvect.min())
axs[j].set_ylim(yvect.min(), yvect.max())
axs[j].grid(True, which='both')
axs[j].set_aspect('auto')
axs[j].tick_params(which='major', length=12, labelsize=12)
axs[j].tick_params(which='minor', length=6)
axs[j].locator_params(axis='x', nbins=5)
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='black')
cbax = fig.add_axes([0.88, 0.125, 0.02, 0.84])
cbar = fig.colorbar(csmsky, | |
velocity along x-axis.
vinf_lims: limit of velocity at large distance along x-axis.
"""
# check input
sig_x_lims = np.array(sig_x_lims)
assert np.all(sig_x_lims > 0.)
sig_y_lims = np.array(sig_y_lims)
assert np.all(sig_y_lims > 0.)
#alpha_lims = np.array(alpha_lims)
#assert np.all(alpha_lims >= 1.)
vmax_lims = np.array(vmax_lims)
vinf_lims = np.array(vinf_lims)
sign_vmax = np.sign(vmax_lims)
sign_vinf = np.sign(vinf_lims)
# check vmax's and v0's have consistent directions and magnitudes
all_positive = np.isin(sign_vmax, [0,1])
all_negative = np.isin(sign_vmax, [0,-1])
assert np.all(all_positive) or np.all(all_negative)
all_positive = np.isin(sign_vinf, [0,1])
all_negative = np.isin(sign_vinf, [0,-1])
assert np.all(all_positive) or np.all(all_negative)
assert np.all(np.abs(vmax_lims) >= np.abs(vinf_lims))
# linearly interpolate and reshape inputs
sig_xp = self.linear_interpolate_t(*sig_x_lims)
sig_yp = self.linear_interpolate_t(*sig_y_lims)
rmax = self.linear_interpolate_t(*rmax_lims)
alpha = (rmax/sig_xp+1.)/(rmax/sig_xp)
vmax = self.linear_interpolate_t(*vmax_lims)
vinf = self.linear_interpolate_t(*vinf_lims)
sig_xp = sig_xp[:, np.newaxis, np.newaxis]
sig_yp = sig_yp[:, np.newaxis, np.newaxis]
alpha = alpha[:, np.newaxis, np.newaxis]
vmax = vmax[:, np.newaxis, np.newaxis]
vinf = vinf[:, np.newaxis, np.newaxis]
# make mu_v maps
th = np.arctan(self.xxp/sig_xp/self.yyp*sig_yp)
idx = np.where(self.yyp==0)
th[:, idx[0], idx[1]] = np.pi/2.
th = np.abs(th)
th = th/np.pi*2.
self.th = th
rr2 = (self.xxp/sig_xp)**2 + (self.yyp/sig_yp)**2
rr = rr2**0.5
alpha_m1 = alpha - 1.
k = (vmax-vinf) * alpha**alpha / alpha_m1**alpha_m1
mu_v = vinf + k * rr / (rr+1.)**alpha
mu_v *= th
mu_v *= np.sign(self.xxp)
self.mu_v_pars = dict(sig_x_lims=sig_x_lims,
sig_y_lims=sig_y_lims,
rmax_lims=rmax_lims,
vmax_lims=vmax_lims,
vinf_lims=vinf_lims)
self.mu_v = mu_v
def set_sig_v(self,
sig_x_lims=(0.5, 0.5),
sig_y_lims=(0.5, 0.1),
alpha_lims=(1.5, 2.5),
sig_v_in_lims=(50., 250.),
sig_v_out_lims=(10., 50)):
"""Set age-and-space dependent velocity dispersion maps
Dispersion maps vary as power-laws between central value sig_v_in, outer
value sig_v_out, with slopes alpha. Velocity dispersion is constant on
ellipses with axis-lengths sig_x and sig_y. The quantities sig_x, sig_y,
alpha, sig_v_in, sig_v_out vary linearly with time between their
specified (end, start) values.
Args:
sig_x_lims: (end,start) value of the x-extent of equicontours.
sig_y_lims: (end,start) value of the y-extent of equicontours.
alpha_lims: (end,start) value of power-law slope.
sig_v_in_lims: (end,start) value of central dispersion.
sig_v_out_lims: (end,start) value of outer dispersion.
"""
# check input
sig_x_lims = np.array(sig_x_lims)
assert np.all(sig_x_lims > 0.)
sig_y_lims = np.array(sig_y_lims)
assert np.all(sig_y_lims > 0.)
alpha_lims = np.array(alpha_lims)
assert np.all(alpha_lims >= 1.)
sig_v_in_lims = np.array(sig_v_in_lims)
assert np.all(sig_v_in_lims > 0.)
sig_v_out_lims = np.array(sig_v_out_lims)
assert np.all(sig_v_out_lims > 0.)
# linearly interpolate and reshape inputs
sig_xp = self.linear_interpolate_t(*sig_x_lims)
sig_yp = self.linear_interpolate_t(*sig_y_lims)
alpha = self.linear_interpolate_t(*alpha_lims)
sig_v_in = self.linear_interpolate_t(*sig_v_in_lims)
sig_v_out = self.linear_interpolate_t(*sig_v_out_lims)
sig_xp = sig_xp[:, np.newaxis, np.newaxis]
sig_yp = sig_yp[:, np.newaxis, np.newaxis]
alpha = alpha[:, np.newaxis, np.newaxis]
sig_v_in = sig_v_in[:, np.newaxis, np.newaxis]
sig_v_out = sig_v_out[:, np.newaxis, np.newaxis]
# evaluate sig_v maps
rr2 = (self.xxp/sig_xp)**2 + (self.yyp/sig_yp)**2
rr = rr2**0.5
log_sig_v_in = np.log(sig_v_in)
log_sig_v_out = np.log(sig_v_out)
delta_log_sig_v = log_sig_v_in - log_sig_v_out
log_sig = log_sig_v_out + delta_log_sig_v * alpha**-rr
sig = np.exp(log_sig)
self.sig_v_pars = dict(sig_x_lims=sig_x_lims,
sig_y_lims=sig_y_lims,
alpha_lims=alpha_lims,
sig_v_in_lims=sig_v_in_lims,
sig_v_out_lims=sig_v_out_lims)
self.sig_v = sig
def get_p_t(self, density=True, light_weighted=False):
"""Get p(t)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_t = self.p_t.copy()
if density is False:
p_t *= self.cube.ssps.delta_t
else:
p_tz = self.get_p_tz(density=density, light_weighted=True)
if density is True:
ssps = self.cube.ssps
p_tz = p_tz * ssps.delta_z
p_t = np.sum(p_tz, 1)
return p_t
def get_p_x_t(self, density=True, light_weighted=False):
"""Get p(x|t)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_x_t = self.p_x_t.copy()
if density is False:
p_x_t *= (self.cube.dx * self.cube.dy)
else:
p_txz = self.get_p_txz(density=density, light_weighted=True)
if density is True:
ssps = self.cube.ssps
p_txz = p_txz * ssps.delta_z
p_tx = np.sum(p_txz, -1)
p_t = self.get_p_t(density=density, light_weighted=True)
p_x_t = (p_tx.T/p_t).T
p_x_t = np.einsum('txy->xyt', p_x_t)
return p_x_t
def get_p_tx(self, density=True, light_weighted=False):
"""Get p(t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_x_t = self.get_p_x_t(density=density, light_weighted=light_weighted)
p_t = self.get_p_t(density=density, light_weighted=light_weighted)
p_xt = p_x_t*p_t
p_tx = np.einsum('xyt->txy', p_xt)
return p_tx
def get_p_z_tx(self, density=True, light_weighted=False):
"""Get p(z|t,x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_z_tx = self.p_z_tx.copy()
else:
p_txz = self.get_p_txz(density=True, light_weighted=True)
p_tx = self.get_p_tx(density=True, light_weighted=True)
p_z_tx = (p_txz.T/p_tx.T).T
p_z_tx = np.einsum('txyz->ztxy', p_z_tx)
if density is False:
dz = self.cube.ssps.delta_z
na = np.newaxis
dz = dz[:, na, na, na]
p_z_tx *= dz
return p_z_tx
def get_p_txz(self, density=True, light_weighted=False):
"""Get p(t,x,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
new_ax = np.newaxis
p_t = self.get_p_t(density=density)
p_t = p_t[:, new_ax, new_ax, new_ax]
p_x_t = self.get_p_x_t(density=density)
p_x_t = np.rollaxis(p_x_t, 2, 0)
p_x_t = p_x_t[:, :, :, new_ax]
p_z_tx = self.get_p_z_tx(density=density)
p_z_tx = np.rollaxis(p_z_tx, 0, 4)
p_txz = p_t * p_x_t * p_z_tx
if light_weighted:
ssps = self.cube.ssps
light_weights = ssps.light_weights[:,new_ax,new_ax,:]
P_txz_mass_wtd = self.get_p_txz(density=False)
normalisation = np.sum(P_txz_mass_wtd*light_weights)
p_txz = p_txz*light_weights/normalisation
return p_txz
def get_p_x(self, density=True, light_weighted=False):
"""Get p(x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
if light_weighted is False:
p_x_t = self.get_p_x_t(density=density)
P_t = self.get_p_t(density=False)
p_x = np.sum(p_x_t * P_t, -1)
else:
na = np.newaxis
ssps = self.cube.ssps
p_txz = self.get_p_txz(density=density, light_weighted=True)
if density is False:
p_x = np.sum(p_txz, (0,3))
else:
delta_tz = ssps.delta_t[:,na,na,na]*ssps.delta_z[na,na,na,:]
p_x = np.sum(p_txz*delta_tz, (0,3))
return p_x
def get_p_z(self, density=True, light_weighted=False):
"""Get p(z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
if light_weighted is False:
p_z_tx = self.get_p_z_tx(density=density)
P_x_t = self.get_p_x_t(density=False) # to marginalise, must be a probabilty
P_x_t = np.einsum('xyt->txy', P_x_t)
P_x_t = P_x_t[na,:,:,:]
P_t = self.get_p_t(density=False) # to marginalise, must be a probabilty
P_t = P_t[na,:,na,na]
p_z = np.sum(p_z_tx * P_x_t * P_t, (1,2,3))
else:
p_tz = self.get_p_tz(density=density, light_weighted=True)
if density is False:
p_z = np.sum(p_tz, 0)
else:
ssps = self.cube.ssps
delta_t = ssps.delta_t[:,na]
p_z = np.sum(p_tz*delta_t, 0)
return p_z
def get_p_tz_x(self, density=True, light_weighted=False):
"""Get p(t,z|x)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
new_ax = np.newaxis
# evaluate both as densities...
p_txz = self.get_p_txz(density=density)
p_x = self.get_p_x(density=density)
# ... since dx appears on top and bottom, hence cancel
p_tz_x = p_txz/p_x[new_ax,:,:,new_ax] # shape txz
p_tz_x = np.rollaxis(p_tz_x, 3, 1) # shape tzx
if light_weighted:
ssps = self.cube.ssps
light_weights = ssps.light_weights[:,:,new_ax,new_ax]
P_tz_x_mass_wtd = self.get_p_tz_x(density=False)
normalisation = np.sum(P_tz_x_mass_wtd*light_weights, (0,1))
p_tz_x = p_tz_x*light_weights/normalisation
return p_tz_x
def get_p_tz(self, density=True, light_weighted=False):
"""Get p(t,z)
Args:
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
p_tz_x = self.get_p_tz_x(density=density)
P_x = self.get_p_x(density=False)
p_tz = np.sum(p_tz_x * P_x, (2,3))
if light_weighted:
ssps = self.cube.ssps
P_tz_mass_wtd = self.get_p_tz(density=False)
normalisation = np.sum(P_tz_mass_wtd*ssps.light_weights)
p_tz = p_tz*ssps.light_weights/normalisation
return p_tz
def get_p_v_tx(self, v_edg, density=True, light_weighted=False):
"""Get p(v|t,x)
Args:
v_edg : array of velocity-bin edges to evaluate the quantity
density (bool): whether to return probabilty density (True) or the
volume-element weighted probabilty (False)
light_weighted (bool): whether to return light-weighted (True) or
mass-weighted (False) quantity
Returns:
array
"""
na = np.newaxis
if light_weighted is False:
v_edg = v_edg[:, na, na, na]
norm = stats.norm(loc=self.mu_v, scale=self.sig_v)
p_v_tx = norm.cdf(v_edg[1:]) - norm.cdf(v_edg[:-1])
if density is True:
dv = v_edg[1:] - v_edg[:-1]
p_v_tx /= dv
else:
p_tvxz = self.get_p_tvxz(v_edg, density=True, light_weighted=True)
if density is False:
dv = v_edg[1:] - v_edg[:-1]
dv = dv[na, :, na, na, na]
p_tvxz = p_tvxz*dv
ssps = self.cube.ssps
p_tvx = np.sum(p_tvxz*ssps.delta_z, -1)
p_x_t = self.get_p_x_t(density=True, light_weighted=True)
p_t = self.get_p_t(density=True, light_weighted=True)
p_xt = | |
<reponame>Zer0Credibility/sympy<gh_stars>0
from sympy import symbols, IndexedBase, Identity, cos, Inverse
from sympy.codegen.array_utils import (CodegenArrayContraction,
CodegenArrayTensorProduct, CodegenArrayDiagonal,
CodegenArrayPermuteDims, CodegenArrayElementwiseAdd,
_codegen_array_parse, _recognize_matrix_expression, _RecognizeMatOp,
_RecognizeMatMulLines, _unfold_recognized_expr,
parse_indexed_expression, recognize_matrix_expression,
parse_matrix_expression)
from sympy import MatrixSymbol, Sum
from sympy.combinatorics import Permutation
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices import Trace, MatAdd, MatMul, Transpose
from sympy.utilities.pytest import raises
from sympy.tensor.array import permutedims, tensorproduct, tensorcontraction
from sympy.matrices.expressions.diagonal import DiagMatrix
A, B = symbols("A B", cls=IndexedBase)
i, j, k, l, m, n = symbols("i j k l m n")
M = MatrixSymbol("M", k, k)
N = MatrixSymbol("N", k, k)
P = MatrixSymbol("P", k, k)
Q = MatrixSymbol("Q", k, k)
def test_codegen_array_contraction_construction():
cg = CodegenArrayContraction(A)
assert cg == A
s = Sum(A[i]*B[i], (i, 0, 3))
cg = parse_indexed_expression(s)
assert cg == CodegenArrayContraction(CodegenArrayTensorProduct(A, B), (0, 1))
cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B), (1, 0))
assert cg == CodegenArrayContraction(CodegenArrayTensorProduct(A, B), (0, 1))
expr = M*N
result = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2))
assert parse_matrix_expression(expr) == result
elem = expr[i, j]
assert parse_indexed_expression(elem) == result
expr = M*N*M
result = CodegenArrayContraction(CodegenArrayTensorProduct(M, N, M), (1, 2), (3, 4))
assert parse_matrix_expression(expr) == result
elem = expr[i, j]
result = CodegenArrayContraction(CodegenArrayTensorProduct(M, M, N), (1, 4), (2, 5))
cg = parse_indexed_expression(elem)
cg = cg.sort_args_by_name()
assert cg == result
def test_codegen_array_contraction_indices_types():
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (0, 1))
indtup = cg._get_contraction_tuples()
assert indtup == [[(0, 0), (0, 1)]]
assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(0, 1)]
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2))
indtup = cg._get_contraction_tuples()
assert indtup == [[(0, 1), (1, 0)]]
assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(1, 2)]
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, M, N), (1, 4), (2, 5))
indtup = cg._get_contraction_tuples()
assert indtup == [[(0, 1), (2, 0)], [(1, 0), (2, 1)]]
assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(1, 4), (2, 5)]
def test_codegen_array_recognize_matrix_mul_lines():
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M), (0, 1))
assert recognize_matrix_expression(cg) == Trace(M)
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (0, 1), (2, 3))
assert recognize_matrix_expression(cg) == Trace(M)*Trace(N)
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (0, 3), (1, 2))
assert recognize_matrix_expression(cg) == Trace(M*N)
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (0, 2), (1, 3))
assert recognize_matrix_expression(cg) == Trace(M*N.T)
cg = parse_indexed_expression((M*N*P)[i,j])
assert recognize_matrix_expression(cg) == M*N*P
cg = parse_matrix_expression(M*N*P)
assert recognize_matrix_expression(cg) == M*N*P
cg = parse_indexed_expression((M*N.T*P)[i,j])
assert recognize_matrix_expression(cg) == M*N.T*P
cg = parse_matrix_expression(M*N.T*P)
assert recognize_matrix_expression(cg) == M*N.T*P
cg = CodegenArrayContraction(CodegenArrayTensorProduct(M,N,P,Q), (1, 2), (5, 6))
assert recognize_matrix_expression(cg) == [M*N, P*Q]
expr = -2*M*N
elem = expr[i, j]
cg = parse_indexed_expression(elem)
assert recognize_matrix_expression(cg) == -2*M*N
def test_codegen_array_flatten():
# Flatten nested CodegenArrayTensorProduct objects:
expr1 = CodegenArrayTensorProduct(M, N)
expr2 = CodegenArrayTensorProduct(P, Q)
expr = CodegenArrayTensorProduct(expr1, expr2)
assert expr == CodegenArrayTensorProduct(M, N, P, Q)
assert expr.args == (M, N, P, Q)
# Flatten mixed CodegenArrayTensorProduct and CodegenArrayContraction objects:
cg1 = CodegenArrayContraction(expr1, (1, 2))
cg2 = CodegenArrayContraction(expr2, (0, 3))
expr = CodegenArrayTensorProduct(cg1, cg2)
assert expr == CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (1, 2), (4, 7))
expr = CodegenArrayTensorProduct(M, cg1)
assert expr == CodegenArrayContraction(CodegenArrayTensorProduct(M, M, N), (3, 4))
# Flatten nested CodegenArrayContraction objects:
cgnested = CodegenArrayContraction(cg1, (0, 1))
assert cgnested == CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (0, 3), (1, 2))
cgnested = CodegenArrayContraction(CodegenArrayTensorProduct(cg1, cg2), (0, 3))
assert cgnested == CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (0, 6), (1, 2), (4, 7))
cg3 = CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (1, 3), (2, 4))
cgnested = CodegenArrayContraction(cg3, (0, 1))
assert cgnested == CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (0, 5), (1, 3), (2, 4))
cgnested = CodegenArrayContraction(cg3, (0, 3), (1, 2))
assert cgnested == CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (0, 7), (1, 3), (2, 4), (5, 6))
cg4 = CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (1, 5), (3, 7))
cgnested = CodegenArrayContraction(cg4, (0, 1))
assert cgnested == CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (0, 2), (1, 5), (3, 7))
cgnested = CodegenArrayContraction(cg4, (0, 1), (2, 3))
assert cgnested == CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P, Q), (0, 2), (1, 5), (3, 7), (4, 6))
cg = CodegenArrayDiagonal(cg4)
assert cg == cg4
assert isinstance(cg, type(cg4))
# Flatten nested CodegenArrayDiagonal objects:
cg1 = CodegenArrayDiagonal(expr1, (1, 2))
cg2 = CodegenArrayDiagonal(expr2, (0, 3))
cg3 = CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P, Q), (1, 3), (2, 4))
cg4 = CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P, Q), (1, 5), (3, 7))
cgnested = CodegenArrayDiagonal(cg1, (0, 1))
assert cgnested == CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N), (1, 2), (0, 3))
cgnested = CodegenArrayDiagonal(cg3, (1, 2))
assert cgnested == CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P, Q), (1, 3), (2, 4), (5, 6))
cgnested = CodegenArrayDiagonal(cg4, (1, 2))
assert cgnested == CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P, Q), (1, 5), (3, 7), (2, 4))
def test_codegen_array_parse():
expr = M[i, j]
assert _codegen_array_parse(expr) == (M, (i, j))
expr = M[i, j]*N[k, l]
assert _codegen_array_parse(expr) == (CodegenArrayTensorProduct(M, N), (i, j, k, l))
expr = M[i, j]*N[j, k]
assert _codegen_array_parse(expr) == (CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N), (1, 2)), (i, k, j))
expr = Sum(M[i, j]*N[j, k], (j, 0, k-1))
assert _codegen_array_parse(expr) == (CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2)), (i, k))
expr = M[i, j] + N[i, j]
assert _codegen_array_parse(expr) == (CodegenArrayElementwiseAdd(M, N), (i, j))
expr = M[i, j] + N[j, i]
assert _codegen_array_parse(expr) == (CodegenArrayElementwiseAdd(M, CodegenArrayPermuteDims(N, Permutation([1,0]))), (i, j))
expr = M[i, j] + M[j, i]
assert _codegen_array_parse(expr) == (CodegenArrayElementwiseAdd(M, CodegenArrayPermuteDims(M, Permutation([1,0]))), (i, j))
expr = (M*N*P)[i, j]
assert _codegen_array_parse(expr) == (CodegenArrayContraction(CodegenArrayTensorProduct(M, N, P), (1, 2), (3, 4)), (i, j))
expr = expr.function # Disregard summation in previous expression
ret1, ret2 = _codegen_array_parse(expr)
assert ret1 == CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P), (1, 2), (3, 4))
assert str(ret2) == "(i, j, _i_1, _i_2)"
expr = KroneckerDelta(i, j)*M[i, k]
assert _codegen_array_parse(expr) == (M, ({i, j}, k))
expr = KroneckerDelta(i, j)*KroneckerDelta(j, k)*M[i, l]
assert _codegen_array_parse(expr) == (M, ({i, j, k}, l))
expr = KroneckerDelta(j, k)*(M[i, j]*N[k, l] + N[i, j]*M[k, l])
assert _codegen_array_parse(expr) == (CodegenArrayDiagonal(CodegenArrayElementwiseAdd(
CodegenArrayTensorProduct(M, N),
CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), Permutation(0, 2)(1, 3))
), (1, 2)), (i, l, frozenset({j, k})))
expr = KroneckerDelta(j, m)*KroneckerDelta(m, k)*(M[i, j]*N[k, l] + N[i, j]*M[k, l])
assert _codegen_array_parse(expr) == (CodegenArrayDiagonal(CodegenArrayElementwiseAdd(
CodegenArrayTensorProduct(M, N),
CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), Permutation(0, 2)(1, 3))
), (1, 2)), (i, l, frozenset({j, m, k})))
expr = KroneckerDelta(i, j)*KroneckerDelta(j, k)*KroneckerDelta(k,m)*M[i, 0]*KroneckerDelta(m, n)
assert _codegen_array_parse(expr) == (M, ({i,j,k,m,n}, 0))
expr = M[i, i]
assert _codegen_array_parse(expr) == (CodegenArrayDiagonal(M, (0, 1)), (i,))
def test_codegen_array_diagonal():
cg = CodegenArrayDiagonal(M, (1, 0))
assert cg == CodegenArrayDiagonal(M, (0, 1))
cg = CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P), (4, 1), (2, 0))
assert cg == CodegenArrayDiagonal(CodegenArrayTensorProduct(M, N, P), (1, 4), (0, 2))
def test_codegen_recognize_matrix_expression():
expr = CodegenArrayElementwiseAdd(M, CodegenArrayPermuteDims(M, [1, 0]))
rec = _recognize_matrix_expression(expr)
assert rec == _RecognizeMatOp(MatAdd, [M, _RecognizeMatOp(Transpose, [M])])
assert _unfold_recognized_expr(rec) == M + Transpose(M)
expr = M[i,j] + N[i,j]
p1, p2 = _codegen_array_parse(expr)
rec = _recognize_matrix_expression(p1)
assert rec == _RecognizeMatOp(MatAdd, [M, N])
assert _unfold_recognized_expr(rec) == M + N
expr = M[i,j] + N[j,i]
p1, p2 = _codegen_array_parse(expr)
rec = _recognize_matrix_expression(p1)
assert rec == _RecognizeMatOp(MatAdd, [M, _RecognizeMatOp(Transpose, [N])])
assert _unfold_recognized_expr(rec) == M + N.T
expr = M[i,j]*N[k,l] + N[i,j]*M[k,l]
p1, p2 = _codegen_array_parse(expr)
rec = _recognize_matrix_expression(p1)
assert rec == _RecognizeMatOp(MatAdd, [_RecognizeMatMulLines([M, N]), _RecognizeMatMulLines([N, M])])
#assert _unfold_recognized_expr(rec) == TensorProduct(M, N) + TensorProduct(N, M) maybe?
expr = (M*N*P)[i, j]
p1, p2 = _codegen_array_parse(expr)
rec = _recognize_matrix_expression(p1)
assert rec == _RecognizeMatMulLines([_RecognizeMatOp(MatMul, [M, N, P])])
assert _unfold_recognized_expr(rec) == M*N*P
expr = Sum(M[i,j]*(N*P)[j,m], (j, 0, k-1))
p1, p2 = _codegen_array_parse(expr)
rec = _recognize_matrix_expression(p1)
assert rec == _RecognizeMatOp(MatMul, [M, N, P])
assert _unfold_recognized_expr(rec) == M*N*P
expr = Sum((P[j, m] + P[m, j])*(M[i,j]*N[m,n] + N[i,j]*M[m,n]), (j, 0, k-1), (m, 0, k-1))
p1, p2 = _codegen_array_parse(expr)
rec = _recognize_matrix_expression(p1)
assert rec == _RecognizeMatOp(MatAdd, [
_RecognizeMatOp(MatMul, [M, _RecognizeMatOp(MatAdd, [P, _RecognizeMatOp(Transpose, [P])]), N]),
_RecognizeMatOp(MatMul, [N, _RecognizeMatOp(MatAdd, [P, _RecognizeMatOp(Transpose, [P])]), M])
])
assert _unfold_recognized_expr(rec) == M*(P + P.T)*N + N*(P + P.T)*M
def test_codegen_array_shape():
expr = CodegenArrayTensorProduct(M, N, P, Q)
assert expr.shape == (k, k, k, k, k, k, k, k)
Z = MatrixSymbol("Z", m, n)
expr = CodegenArrayTensorProduct(M, Z)
assert expr.shape == (k, k, m, n)
expr2 = CodegenArrayContraction(expr, (0, 1))
assert expr2.shape == (m, n)
expr2 = CodegenArrayDiagonal(expr, (0, 1))
assert expr2.shape == (m, n, k)
exprp = CodegenArrayPermuteDims(expr, [2, 1, 3, 0])
assert exprp.shape == (m, k, n, k)
expr3 = CodegenArrayTensorProduct(N, Z)
expr2 = CodegenArrayElementwiseAdd(expr, expr3)
assert expr2.shape == (k, k, m, n)
# Contraction along axes with discordant dimensions:
raises(ValueError, lambda: CodegenArrayContraction(expr, (1, 2)))
# Also diagonal needs the same dimensions:
raises(ValueError, lambda: CodegenArrayDiagonal(expr, (1, 2)))
def test_codegen_array_parse_out_of_bounds():
expr = Sum(M[i, i], (i, 0, 4))
raises(ValueError, lambda: parse_indexed_expression(expr))
expr = Sum(M[i, i], (i, 0, k))
raises(ValueError, lambda: parse_indexed_expression(expr))
expr | |
Data", body="station did not got ip Test failed.")
assert False
else:
if float(str(kpi_val[0])[1:-1]) > float(500):
print("Test passed successfully")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert True
else:
print(" valueTest faled due to lesser")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert False
else:
print("test failed due to no station ip")
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-5065", name="WIFI-5065")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.degree60_nss1_10db
def test_nss1_wpa2_personal_5g_10db_60degree(self, setup_profiles, lf_tools, lf_test, station_names_fiveg,
create_lanforge_chamberview_dut, get_configuration):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
mode = "BRIDGE"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
print("station", station)
ser_no = lf_test.attenuator_serial()
print(ser_no)
val = [['modes: Auto'], ['pkts: MTU'], ['directions: DUT Transmit'], ['traffic_types:UDP'],
['bandw_options: AUTO'], ['spatial_streams: 1'], ['attenuator: ' + str(ser_no[0])], ['attenuator2: ' + str(ser_no[1])],
['attenuations: 100'], ['attenuations2: 100'], ['chamber: DUT-Chamber'], ['tt_deg: 60']]
if station:
time.sleep(3)
rvr_o = lf_test.ratevsrange(station_name=station_names_fiveg, mode=mode,
instance_name="SPATIAL_NSS1_RVR1_Degree60_fiveg",
vlan_id=vlan, dut_name=dut_name, raw_lines=val)
report_name = rvr_o.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
print("report name ", report_name)
entries = os.listdir("../reports/" + report_name + '/')
print("entries", entries)
lf_tools.attach_report_graphs(report_name=report_name,
pdf_name="Rate vs Range Test - UDP 2.4G")
kpi = False
for i in entries:
if "kpi.csv" in i:
kpi = i
if kpi:
allure.attach.file(source="../reports/" + report_name + "/" + kpi,
name="kpi.csv")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
kpi_val = lf_tools.read_kpi_file(column_name=["numeric-score"], dir_name=report_name)
print(type(kpi_val))
print(kpi_val)
print(str(kpi_val[0])[1:-1])
if str(kpi_val) == "empty":
print("kpi is empty, station did not got ip, Test failed")
allure.attach(name="Kpi Data", body="station did not got ip Test failed.")
assert False
else:
if float(str(kpi_val[0])[1:-1]) > float(250):
print("Test passed successfully")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert True
else:
print(" valueTest faled due to lesser")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert False
else:
print("test failed due to no station ip")
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-5064", name="WIFI-5064")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.degree60_nss2_10db
def test_nss2_wpa2_personal_5g_10db_60degree(self, setup_profiles, lf_tools, lf_test, station_names_fiveg,
create_lanforge_chamberview_dut, get_configuration):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
mode = "BRIDGE"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
print("station", station)
ser_no = lf_test.attenuator_serial()
print(ser_no)
val = [['modes: Auto'], ['pkts: MTU'], ['directions: DUT Transmit'], ['traffic_types:UDP'],
['bandw_options: AUTO'], ['spatial_streams: 2'], ['attenuator: ' + str(ser_no[0])], ['attenuator2: ' + str(ser_no[1])],
['attenuations: 100'], ['attenuations2: 100'], ['chamber: DUT-Chamber'], ['tt_deg: 60']]
if station:
time.sleep(3)
rvr_o = lf_test.ratevsrange(station_name=station_names_fiveg, mode=mode,
instance_name="SPATIAL_NSS2_RVR1_Degree60_fiveg_10db",
vlan_id=vlan, dut_name=dut_name, raw_lines=val)
report_name = rvr_o.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
print("report name ", report_name)
entries = os.listdir("../reports/" + report_name + '/')
print("entries", entries)
lf_tools.attach_report_graphs(report_name=report_name,
pdf_name="Rate vs Range Test - UDP 2.4G")
kpi = False
for i in entries:
if "kpi.csv" in i:
kpi = i
if kpi:
allure.attach.file(source="../reports/" + report_name + "/" + kpi,
name="kpi.csv")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
kpi_val = lf_tools.read_kpi_file(column_name=["numeric-score"], dir_name=report_name)
print(type(kpi_val))
print(kpi_val)
print(str(kpi_val[0])[1:-1])
if str(kpi_val) == "empty":
print("kpi is empty, station did not got ip, Test failed")
allure.attach(name="Kpi Data", body="station did not got ip Test failed.")
assert False
else:
if float(str(kpi_val[0])[1:-1]) > float(500):
print("Test passed successfully")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert True
else:
print(" valueTest faled due to lesser")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert False
else:
print("test failed due to no station ip")
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-5063", name="WIFI-5063")
@pytest.mark.wpa2_personal
@pytest.mark.twog
@pytest.mark.degree120_nss1_10db
def test_nss1_wpa2_personal_2g_10db_120degree(self, setup_profiles, lf_tools, lf_test, station_names_twog,
create_lanforge_chamberview_dut, get_configuration):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
mode = "BRIDGE"
band = "twog"
vlan = 1
dut_name = create_lanforge_chamberview_dut
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_twog, vlan_id=vlan)
print("station", station)
ser_no = lf_test.attenuator_serial()
print(ser_no)
val = [['modes: Auto'], ['pkts: MTU'], ['directions: DUT Transmit'], ['traffic_types:UDP'],
['bandw_options: AUTO'], ['spatial_streams: 1'], ['attenuator: ' + str(ser_no[0])], ['attenuator2: ' + str(ser_no[1])],
['attenuations: 100'], ['attenuations2: 100'], ['chamber: DUT-Chamber'], ['tt_deg: 120']]
if station:
time.sleep(3)
rvr_o = lf_test.ratevsrange(station_name=station_names_twog, mode=mode,
instance_name="SPATIAL_NSS1_RVR1_Degree120_twog_10db",
vlan_id=vlan, dut_name=dut_name, raw_lines=val)
report_name = rvr_o.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
print("report name ", report_name)
entries = os.listdir("../reports/" + report_name + '/')
print("entries", entries)
lf_tools.attach_report_graphs(report_name=report_name,
pdf_name="Rate vs Range Test - UDP 2.4G")
kpi = False
for i in entries:
if "kpi.csv" in i:
kpi = i
if kpi:
allure.attach.file(source="../reports/" + report_name + "/" + kpi,
name="kpi.csv")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_twog)
kpi_val = lf_tools.read_kpi_file(column_name=["numeric-score"], dir_name=report_name)
print(type(kpi_val))
print(kpi_val)
print(str(kpi_val[0])[1:-1])
if str(kpi_val) == "empty":
print("kpi is empty, station did not got ip, Test failed")
allure.attach(name="Kpi Data", body="station did not got ip Test failed.")
assert False
else:
if float(str(kpi_val[0])[1:-1]) > float(45):
print("Test passed successfully")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert True
else:
print(" valueTest faled due to lesser")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert False
else:
print("test failed due to no station ip")
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-5062", name="WIFI-5062")
@pytest.mark.wpa2_personal
@pytest.mark.twog
@pytest.mark.degree120_nss2_10db
def test_nss2_wpa2_personal_2g_10db_120degree(self, setup_profiles, lf_tools, lf_test, station_names_twog,
create_lanforge_chamberview_dut, get_configuration):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
mode = "BRIDGE"
band = "twog"
vlan = 1
dut_name = create_lanforge_chamberview_dut
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_twog, vlan_id=vlan)
print("station", station)
ser_no = lf_test.attenuator_serial()
print(ser_no)
val = [['modes: Auto'], ['pkts: MTU'], ['directions: DUT Transmit'], ['traffic_types:UDP'],
['bandw_options: AUTO'], ['spatial_streams: 2'], ['attenuator: ' + str(ser_no[0])], ['attenuator2: ' + str(ser_no[1])],
['attenuations: 100'], ['attenuations2: 100'], ['chamber: DUT-Chamber'], ['tt_deg: 120']]
if station:
time.sleep(3)
rvr_o = lf_test.ratevsrange(station_name=station_names_twog, mode=mode,
instance_name="SPATIAL_NSS2_RVR1_Degree120_twog_10db",
vlan_id=vlan, dut_name=dut_name, raw_lines=val)
report_name = rvr_o.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
print("report name ", report_name)
entries = os.listdir("../reports/" + report_name + '/')
print("entries", entries)
lf_tools.attach_report_graphs(report_name=report_name,
pdf_name="Rate vs Range Test - UDP 2.4G")
kpi = False
for i in entries:
if "kpi.csv" in i:
kpi = i
if kpi:
allure.attach.file(source="../reports/" + report_name + "/" + kpi,
name="kpi.csv")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_twog)
kpi_val = lf_tools.read_kpi_file(column_name=["numeric-score"], dir_name=report_name)
print(type(kpi_val))
print(kpi_val)
print(str(kpi_val[0])[1:-1])
if str(kpi_val) == "empty":
print("kpi is empty, station did not got ip, Test failed")
allure.attach(name="Kpi Data", body="station did not got ip Test failed.")
assert False
else:
if float(str(kpi_val[0])[1:-1]) > float(90):
print("Test passed successfully")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert True
else:
print(" valueTest faled due to lesser")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert False
else:
print("test failed due to no station ip")
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-5061", name="WIFI-5061")
@pytest.mark.wpa2_personal
@pytest.mark.twog
@pytest.mark.degree240_nss1_10db
def test_nss1_wpa2_personal_2g_10db_240degree(self, setup_profiles, lf_tools, lf_test, station_names_twog,
create_lanforge_chamberview_dut, get_configuration):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
mode = "BRIDGE"
band = "twog"
vlan = 1
dut_name = create_lanforge_chamberview_dut
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_twog, vlan_id=vlan)
print("station", station)
ser_no = lf_test.attenuator_serial()
print(ser_no)
val = [['modes: Auto'], ['pkts: MTU'], ['directions: DUT Transmit'], ['traffic_types:UDP'],
['bandw_options: AUTO'], ['spatial_streams: 1'], ['attenuator: ' + str(ser_no[0])], ['attenuator2: ' + str(ser_no[1])],
['attenuations: 100'], ['attenuations2: 100'], ['chamber: DUT-Chamber'], ['tt_deg: 240']]
if station:
time.sleep(3)
rvr_o = lf_test.ratevsrange(station_name=station_names_twog, mode=mode,
instance_name="SPATIAL_NSS1_RVR1_Degree240_twog_10db",
vlan_id=vlan, dut_name=dut_name, raw_lines=val)
report_name = rvr_o.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
print("report name ", report_name)
entries = os.listdir("../reports/" + report_name + '/')
print("entries", entries)
lf_tools.attach_report_graphs(report_name=report_name,
pdf_name="Rate vs Range Test - UDP 2.4G")
kpi = False
for i in entries:
if "kpi.csv" in i:
kpi = i
if kpi:
allure.attach.file(source="../reports/" + report_name + "/" + kpi,
name="kpi.csv")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_twog)
kpi_val = lf_tools.read_kpi_file(column_name=["numeric-score"], dir_name=report_name)
print(type(kpi_val))
print(kpi_val)
print(str(kpi_val[0])[1:-1])
if str(kpi_val) == "empty":
print("kpi is empty, station did not got ip, Test failed")
allure.attach(name="Kpi Data", body="station did not got ip Test failed.")
assert False
else:
if float(str(kpi_val[0])[1:-1]) > float(45):
print("Test passed successfully")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert True
else:
print(" valueTest faled due to lesser")
allure.attach(name="Kpi Data", body=str(kpi_val))
assert False
else:
print("test failed due to no station ip")
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-5060", name="WIFI-5060")
@pytest.mark.wpa2_personal
@pytest.mark.twog
@pytest.mark.degree240_nss2_10db
def test_nss1_wpa2_personal_2g_10db_240degree(self, setup_profiles, lf_tools, lf_test, station_names_twog,
create_lanforge_chamberview_dut, get_configuration):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
mode = "BRIDGE"
band = "twog"
vlan = 1
dut_name = create_lanforge_chamberview_dut
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_twog, vlan_id=vlan)
print("station", station)
ser_no = lf_test.attenuator_serial()
print(ser_no)
val = [['modes: Auto'], ['pkts: MTU'], ['directions: DUT Transmit'], ['traffic_types:UDP'],
['bandw_options: AUTO'], ['spatial_streams: 2'],['attenuator: ' + str(ser_no[0])], ['attenuator2: ' + str(ser_no[1])],
['attenuations: 100'], ['attenuations2: 100'], ['chamber: DUT-Chamber'], ['tt_deg: 240']]
if station:
time.sleep(3)
rvr_o = lf_test.ratevsrange(station_name=station_names_twog, mode=mode,
instance_name="SPATIAL_NSS2_RVR1_Degree240_twog_10db",
vlan_id=vlan, dut_name=dut_name, raw_lines=val)
report_name = rvr_o.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
print("report name ", report_name)
entries = os.listdir("../reports/" + report_name + '/')
print("entries", entries)
lf_tools.attach_report_graphs(report_name=report_name,
pdf_name="Rate vs Range Test - UDP 2.4G")
kpi = False
for i in entries:
if "kpi.csv" in i:
kpi = i
if kpi:
allure.attach.file(source="../reports/" + report_name + "/" + kpi,
name="kpi.csv")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_twog)
kpi_val = lf_tools.read_kpi_file(column_name=["numeric-score"], dir_name=report_name)
print(type(kpi_val))
print(kpi_val)
print(str(kpi_val[0])[1:-1])
if str(kpi_val) == "empty":
print("kpi is empty, station did not got ip, | |
<filename>service/tasks/driver.py<gh_stars>100-1000
"""
Tasks for driver operations.
NOTE: At this point create options do not have a hard-set requirement for 'CoreIdentity'
Delete/remove operations do. This should be investigated further..
"""
from operator import attrgetter
import time
from django.conf import settings
from django.utils.timezone import datetime, timedelta
from celery.decorators import task
from celery.task import current
from rtwo.exceptions import LibcloudInvalidCredsError, LibcloudBadResponseError
#TODO: Internalize exception into RTwo
from rtwo.exceptions import NonZeroDeploymentException, NeutronBadRequest
from neutronclient.common.exceptions import IpAddressGenerationFailureClient
from threepio import celery_logger, status_logger, logger
from celery import current_app as app
from core.email import send_instance_email
from core.models.instance import Instance
from core.models.identity import Identity
from core.models.profile import UserProfile
from service.deploy import (
user_deploy, build_host_name, ready_to_deploy as ansible_ready_to_deploy,
run_utility_playbooks, execution_has_failures, execution_has_unreachable
)
from service.driver import get_driver, get_account_driver
from service.exceptions import AnsibleDeployException
from service.instance import _update_instance_metadata
from service.mock import MockInstance
def _update_status_log(instance, status_update):
if type(instance) == MockInstance:
return
now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
user = instance._node.extra['metadata']['creator']
except KeyError:
user = "Unknown -- Metadata missing"
size_alias = instance._node.extra['flavorId']
machine_alias = instance._node.extra['imageId']
status_logger.debug(
"%s,%s,%s,%s,%s,%s" % (
now_time, user, instance.alias, machine_alias, size_alias,
status_update
)
)
@task(name="print_debug")
def print_debug():
log_str = "print_debug task finished at %s." % datetime.now()
print log_str
celery_logger.debug(log_str)
@task(name="complete_resize", max_retries=2, default_retry_delay=15)
def complete_resize(
driverCls, provider, identity, instance_alias, core_provider_uuid,
core_identity_uuid, user
):
"""
Confirm the resize of 'instance_alias'
"""
from service import instance as instance_service
try:
celery_logger.debug(
"complete_resize task started at %s." % datetime.now()
)
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_alias)
if not instance:
celery_logger.debug(
"Instance has been teminated: %s." % instance_alias
)
return False, None
result = instance_service.confirm_resize(
driver, instance, core_provider_uuid, core_identity_uuid, user
)
celery_logger.debug(
"complete_resize task finished at %s." % datetime.now()
)
return True, result
except Exception as exc:
celery_logger.exception(exc)
complete_resize.retry(exc=exc)
@task(name="wait_for_instance", max_retries=250, default_retry_delay=15)
def wait_for_instance(
instance_alias,
driverCls,
provider,
identity,
status_query,
tasks_allowed=False,
test_tmp_status=False,
return_id=False,
**task_kwargs
):
"""
#Task makes 250 attempts to 'look at' the instance, waiting 15sec each try
Cumulative time == 1 hour 2 minutes 30 seconds before FAILURE
status_query = "active" Match only one value, active
status_query = ["active","suspended"] or match multiple values.
"""
try:
celery_logger.debug("wait_for task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_alias)
if not instance:
celery_logger.debug(
"Instance has been terminated: %s." % instance_alias
)
return False
result = _is_instance_ready(
instance, status_query, tasks_allowed, test_tmp_status, return_id
)
return result
except Exception as exc:
if "Not Ready" not in str(exc):
# Ignore 'normal' errors.
celery_logger.exception(exc)
wait_for_instance.retry(exc=exc)
def _is_instance_ready(
instance,
status_query,
tasks_allowed=False,
test_tmp_status=False,
return_id=False
):
# TODO: Refactor so that terminal states can be found. IE if waiting for
# 'active' and in status: Suspended - none - GIVE up!!
i_status = instance._node.extra['status'].lower()
i_task = instance._node.extra.get('task', None)
i_tmp_status = instance._node.extra.get('metadata',
{}).get('tmp_status', '')
celery_logger.debug(
"Instance %s: Status: (%s - %s) Tmp status: %s " %
(instance.id, i_status, i_task, i_tmp_status)
)
status_not_ready = (
i_status not in status_query
) # Ex: status 'build' is not in 'active'
tasks_not_ready = (
not tasks_allowed and i_task is not None
) # Ex: Task name: 'scheudling', tasks_allowed=False
tmp_status_not_ready = (
test_tmp_status and i_tmp_status != ""
) # Ex: tmp_status: 'initializing'
celery_logger.debug(
"Status not ready: %s tasks not ready: %s Tmp status_not_ready: %s" %
(status_not_ready, tasks_not_ready, tmp_status_not_ready)
)
if status_not_ready or tasks_not_ready or tmp_status_not_ready:
raise Exception(
"Instance: %s: Status: (%s - %s - %s) Produced:"
"Status not ready: %s tasks not ready: %s Tmp status_not_ready: %s"
% (
instance.id, i_status, i_task, i_tmp_status, status_not_ready,
tasks_not_ready, tmp_status_not_ready
)
)
celery_logger.debug(
"Instance %s: Status: (%s - %s - %s) - Ready" %
(instance.id, i_status, i_task, i_tmp_status)
)
if return_id:
return instance.id
return True
@task(
name="add_fixed_ip",
ignore_result=True,
default_retry_delay=15,
max_retries=15
)
def add_fixed_ip(
driverCls, provider, identity, instance_id, core_identity_uuid=None
):
from service.instance import _to_network_driver, _get_network_id
try:
celery_logger.debug("add_fixed_ip task started at %s." % datetime.now())
core_identity = Identity.objects.get(uuid=core_identity_uuid)
network_driver = _to_network_driver(core_identity)
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
if not instance:
celery_logger.debug(
"Instance has been teminated: %s." % instance_id
)
return None
ports = network_driver.list_ports(device_id=instance.id)
# Catch a common scenario that breaks networking
assert len(ports) == 1, "Attaching a fixed ip requires a single port"
port = ports[0]
port_zone = instance_zone = None
try:
port_zone = port['device_owner'].split(":")[1]
instance_zone = instance.extra['availability_zone']
except:
pass
network_id = _get_network_id(driver, instance)
if port_zone and instance_zone and port_zone != instance_zone:
# If the port and instance are in different zones, delete the old
# port and attach a new one, this only occurs in narrow scenarios
# documented in the following ticket:
# https://bugs.launchpad.net/nova/+bug/1759924
network_driver.delete_port(port)
driver._connection.ex_attach_interface(
instance.id, network_id=network_id
)
elif not instance._node.private_ips:
# Only add fixed ip if the instance doesn't already have one
driver._connection.ex_add_fixed_ip(instance, network_id)
celery_logger.debug(
"add_fixed_ip task finished at %s." % datetime.now()
)
except Exception as exc:
if "Not Ready" not in str(exc):
# Ignore 'normal' errors.
celery_logger.exception(exc)
add_fixed_ip.retry(exc=exc)
def current_openstack_identities():
identities = Identity.objects.filter(
provider__type__name__iexact='openstack', provider__active=True
)
key_sorter = lambda ident: attrgetter(
ident.provider.type.name,
ident.created_by.username)
identities = sorted(identities, key=key_sorter)
return identities
def _remove_extra_floating_ips(driver, tenant_name):
num_ips_removed = driver._clean_floating_ip()
if num_ips_removed:
celery_logger.debug(
"Removed %s ips from OpenStack Tenant %s" %
(num_ips_removed, tenant_name)
)
return num_ips_removed
def _remove_ips_from_inactive_instances(driver, instances, core_identity):
from service import instance as instance_service
for instance in instances:
# DOUBLE-CHECK:
if driver._is_inactive_instance(instance) and instance.ip:
# If an inactive instance has floating IP.. Remove it!
instance_service.remove_floating_ip(
driver, instance, str(core_identity.uuid)
)
return True
@task(name="clear_empty_ips_for")
def clear_empty_ips_for(username, core_provider_id, core_identity_uuid):
"""
RETURN: number_ips_removed
on Failure:
-404, driver creation failure (Verify credentials are accurate)
-401, authorization failure (Change the password of the driver)
-500, cloud failure (Operational support required)
"""
from service.driver import get_esh_driver
from rtwo.driver import OSDriver
# Initialize the drivers
core_identity = Identity.objects.get(uuid=core_identity_uuid)
driver = get_esh_driver(core_identity)
if not isinstance(driver, OSDriver):
return -404
# Get useful info
creds = core_identity.get_credentials()
tenant_name = creds['ex_tenant_name']
celery_logger.info("Checking Identity %s" % tenant_name)
# Attempt to clean floating IPs
_remove_extra_floating_ips(driver, tenant_name)
# Test for active/inactive_instances instances
try:
instances = driver.list_instances()
except LibcloudInvalidCredsError:
logger.exception(
"InvalidCredentials provided for Identity %s" % core_identity
)
return -401
except LibcloudBadResponseError:
logger.exception(
"Driver returned unexpected response for Identity %s" %
core_identity
)
return -500
_remove_ips_from_inactive_instances(driver, instances, core_identity)
@task(name="clear_empty_ips")
def clear_empty_ips():
celery_logger.debug("clear_empty_ips task started at %s." % datetime.now())
if settings.DEBUG:
celery_logger.debug(
"clear_empty_ips task SKIPPED at %s." % datetime.now()
)
return
identities = current_openstack_identities()
for core_identity in identities:
try:
# TODO: Add some
clear_empty_ips_for.apply_async(
args=[
core_identity.created_by.username, core_identity.provider.
id,
str(core_identity.uuid)
]
)
except Exception as exc:
celery_logger.exception(exc)
celery_logger.debug("clear_empty_ips task finished at %s." % datetime.now())
@task(name="_send_instance_email", default_retry_delay=10, max_retries=2)
def _send_instance_email(driverCls, provider, identity, instance_id):
try:
celery_logger.debug(
"_send_instance_email task started at %s." % datetime.now()
)
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
# Breakout if instance has been deleted at this point
if not instance:
celery_logger.debug(
"Instance has been teminated: %s." % instance_id
)
return
#FIXME: this is not a safe way to retrieve username. this is not a CoreIdentity.
username = identity.user.username
profile = UserProfile.objects.get(user__username=username)
if profile.send_emails:
# Only send emails if allowed by profile setting
created = datetime.strptime(
instance.extra['created'], "%Y-%m-%dT%H:%M:%SZ"
)
send_instance_email(
username, instance.id, instance.name, instance.ip, created,
username
)
else:
celery_logger.debug(
"User %s elected NOT to receive new instance emails" % username
)
celery_logger.debug(
"_send_instance_email task finished at %s." % datetime.now()
)
except (BaseException, Exception) as exc:
celery_logger.warn(exc)
_send_instance_email.retry(exc=exc)
def _send_instance_email_with_failure(
driverCls, provider, identity, instance_id, username, error_message
):
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
created = datetime.strptime(instance.extra['created'], "%Y-%m-%dT%H:%M:%SZ")
# Breakout if instance has been deleted at this point
if not instance:
celery_logger.debug("Instance has been teminated: %s." % instance_id)
return
#FIXME: this is not a safe way to retrieve username. this is not a CoreIdentity.
send_instance_email(
username,
instance.id,
instance.name,
instance.ip,
created,
username,
user_failure=True,
user_failure_message=error_message
)
# Deploy and Destroy tasks
@task(name="user_deploy_failed")
def user_deploy_failed(
context,
exception_msg,
traceback,
driverCls,
provider,
identity,
instance_id,
user,
message=None,
**celery_task_args
):
try:
celery_logger.debug(
"user_deploy_failed task started at %s." % datetime.now()
)
celery_logger.info("failed task context=%s" % (context, ))
celery_logger.info("exception_msg=%s" % (exception_msg, ))
err_str = "Error Traceback:%s" % (traceback, )
celery_logger.error(err_str)
# Send deploy email
_send_instance_email_with_failure(
driverCls, provider, identity, instance_id, user.username, err_str
)
# Update metadata on the instance -- use the last 255 chars of traceback (Metadata limited)
limited_trace = str(traceback)[-255:]
metadata = {
'tmp_status': 'user_deploy_error',
'fault_message': str(exception_msg),
'fault_trace': limited_trace
}
update_metadata.s(
driverCls,
provider,
identity,
instance_id,
metadata,
replace_metadata=False
).apply_async()
celery_logger.debug(
"user_deploy_failed task finished at %s." % datetime.now()
)
return err_str
except Exception as exc:
celery_logger.warn(exc)
user_deploy_failed.retry(exc=exc)
@task(name="deploy_failed")
def deploy_failed(
context, exception_msg, traceback, driverCls, provider, identity,
instance_id, **celery_task_args
):
try:
celery_logger.debug(
"deploy_failed task started at %s." % | |
dx,
y=self._view_rect.bottom + dy)
else:
raise ValueError('unknown position, only west, east, north, and'
'south are allowed')
if pos in (POSITION_NORTH, POSITION_SOUTH):
if self.get_hidden_width() != 0:
sbar.set_length(self._view_rect.width + d_size)
sbar.set_maximum(self.get_hidden_width())
sbar.set_page_step(self._view_rect.width * self.get_hidden_width() /
(self._view_rect.width + self.get_hidden_width()))
sbar.show()
else:
sbar.hide()
elif pos in (POSITION_EAST, POSITION_WEST):
if self.get_hidden_height() != 0:
sbar.set_length(self._view_rect.height + d_size)
sbar.set_maximum(self.get_hidden_height())
sbar.set_page_step(self._view_rect.height * self.get_hidden_height() /
(self._view_rect.height + self.get_hidden_height()))
sbar.show()
else:
sbar.hide()
def draw(self, surface: 'pygame.Surface') -> 'ScrollArea':
"""
Draw the ScrollArea.
:param surface: Surface to render the area
:return: Self reference
"""
if not self._world:
return self
# Background surface already has previous decorators
if self._area_color is not None:
self._make_background_surface()
surface.blit(self._bg_surface,
(self._rect.x - self._extend_x, self._rect.y - self._extend_y))
# Draw world surface
# noinspection PyTypeChecker
surface.blit(self._world, self._view_rect.topleft,
(self.get_offsets(), self._view_rect.size))
# Then draw scrollbars
for sbar in self._scrollbars:
if not sbar.is_visible():
continue
if sbar.get_orientation() == ORIENTATION_HORIZONTAL:
if self.get_hidden_width():
sbar.draw(surface)
else:
if self.get_hidden_height():
sbar.draw(surface)
# Draw post decorator
self._decorator.draw_post(surface)
return self
def get_hidden_width(self) -> int:
"""
Return the total width out of the bounds of the viewable area.
Zero is returned if the world width is lower than the viewable area.
:return: Hidden width in px
"""
if not self._world:
return 0
return int(max(0, self._world.get_width() - self._view_rect.width))
def get_hidden_height(self) -> int:
"""
Return the total height out of the bounds of the viewable area.
Zero is returned if the world height is lower than the viewable area.
:return: Hidden height in px
"""
if not self._world:
return 0
return int(max(0, self._world.get_height() - self._view_rect.height))
def get_offsets(self) -> Tuple2IntType:
"""
Return the offset introduced by the scrollbars in the world.
:return: ScrollArea offset on x-axis and y-axis (x, y)
"""
offsets = [0, 0]
for sbar in self._scrollbars:
if not sbar.is_visible():
continue
if sbar.get_orientation() == ORIENTATION_HORIZONTAL:
if self.get_hidden_width():
offsets[0] = sbar.get_value() # Cannot add as each scrollbar can only affect 1 axis only
else:
if self.get_hidden_height():
offsets[1] = sbar.get_value()
return offsets[0], offsets[1]
def get_rect(self, to_real_position: bool = False) -> 'pygame.Rect':
"""
Return the :py:class:`pygame.Rect` object of the ScrollArea.
:param to_real_position: Get real position fof the scroll area
:return: Pygame.Rect object
"""
rect = self._rect.copy()
if to_real_position:
rect = self.to_real_position(rect)
return rect
def get_scrollbar_thickness(self, orientation: str, visible: bool = True) -> int:
"""
Return the scroll thickness of the area. If it's hidden return zero.
:param orientation: Orientation of the scroll. See :py:mod:`pygame_menu.locals`
:param visible: If ``True`` returns the real thickness depending on if it is visible or not
:return: Thickness in px
"""
assert_orientation(orientation)
assert isinstance(visible, bool)
if visible:
total = 0
for sbar in self._scrollbars:
if sbar.get_orientation() == orientation and sbar.is_visible():
total += sbar.get_thickness()
return total
if orientation == ORIENTATION_HORIZONTAL:
return int(self._rect.height - self._view_rect.height)
elif orientation == ORIENTATION_VERTICAL:
return int(self._rect.width - self._view_rect.width)
def get_world_rect(self, absolute: bool = False) -> 'pygame.Rect':
"""
Return the world rect.
:param absolute: To absolute position
:return: World rect object
"""
rect = self._world.get_rect()
if absolute:
rect = self.to_absolute_position(rect)
return rect
def get_view_rect(self) -> 'pygame.Rect':
"""
Subtract width of scrollbars from area with the given size and return
the viewable area.
The viewable area depends on the world size, because scroll bars may or
may not be displayed.
:return: View rect object
"""
rect = pygame.Rect(self._rect)
# No scrollbar: area is large enough to display world
if not self._world or (self._world.get_width() <= self._rect.width
and self._world.get_height() <= self._rect.height):
return rect
# All scrollbars: the world is too large
if self._world.get_height() > self._rect.height \
and self._world.get_width() > self._rect.width:
if POSITION_WEST in self._scrollbar_positions:
rect.left += self._scrollbar_thick
rect.width -= self._scrollbar_thick
if POSITION_EAST in self._scrollbar_positions:
rect.width -= self._scrollbar_thick
if POSITION_NORTH in self._scrollbar_positions:
rect.top += self._scrollbar_thick
rect.height -= self._scrollbar_thick
if POSITION_SOUTH in self._scrollbar_positions:
rect.height -= self._scrollbar_thick
return rect
# Calculate the maximum variations introduces by the scrollbars
bars_total_width = 0
bars_total_height = 0
if POSITION_NORTH in self._scrollbar_positions:
bars_total_height += self._scrollbar_thick
if POSITION_SOUTH in self._scrollbar_positions:
bars_total_height += self._scrollbar_thick
if POSITION_WEST in self._scrollbar_positions:
bars_total_width += self._scrollbar_thick
if POSITION_EAST in self._scrollbar_positions:
bars_total_width += self._scrollbar_thick
if self._world.get_height() > self._rect.height:
if POSITION_WEST in self._scrollbar_positions:
rect.left += self._scrollbar_thick
rect.width -= self._scrollbar_thick
if POSITION_EAST in self._scrollbar_positions:
rect.width -= self._scrollbar_thick
if self._world.get_width() > self._rect.width - bars_total_width:
if POSITION_NORTH in self._scrollbar_positions:
rect.top += self._scrollbar_thick
rect.height -= self._scrollbar_thick
if POSITION_SOUTH in self._scrollbar_positions:
rect.height -= self._scrollbar_thick
if self._world.get_width() > self._rect.width:
if POSITION_NORTH in self._scrollbar_positions:
rect.top += self._scrollbar_thick
rect.height -= self._scrollbar_thick
if POSITION_SOUTH in self._scrollbar_positions:
rect.height -= self._scrollbar_thick
if self._world.get_height() > self._rect.height - bars_total_height:
if POSITION_WEST in self._scrollbar_positions:
rect.left += self._scrollbar_thick
rect.width -= self._scrollbar_thick
if POSITION_EAST in self._scrollbar_positions:
rect.width -= self._scrollbar_thick
return rect
def hide_scrollbars(self, orientation: str) -> 'ScrollArea':
"""
Hide scrollbar from given orientation.
:param orientation: Orientation. See :py:mod:`pygame_menu.locals`
:return: Self reference
"""
assert_orientation(orientation)
for sbar in self._scrollbars:
if sbar.get_orientation() == orientation:
sbar.hide()
return self
def show_scrollbars(self, orientation: str) -> 'ScrollArea':
"""
Hide scrollbar from given orientation.
:param orientation: Orientation. See :py:mod:`pygame_menu.locals`
:return: Self reference
"""
assert_orientation(orientation)
for sbar in self._scrollbars:
if sbar.get_orientation() == orientation:
sbar.show()
return self
def get_world_size(self) -> Tuple2IntType:
"""
Return the world size.
:return: Width, height in pixels
"""
if self._world is None:
return 0, 0
return self._world.get_width(), self._world.get_height()
def get_size(self, inner: bool = False) -> Tuple2IntType:
"""
Return the area size.
:param inner: If ``True`` returns the rect view area
:return: Width, height in pixels
"""
if inner:
return self._view_rect.width, self._view_rect.height
return self._rect.width, self._rect.height
def mouse_is_over(self, view: bool = False) -> bool:
"""
Return ``True`` if the mouse is placed over the ScrollArea.
:param view: If ``True`` uses "view rect" instead of "rect"
:return: ``True`` if the mouse is over the object
"""
rect = self._view_rect if view else self._rect
return bool(self.to_absolute_position(rect).collidepoint(*pygame.mouse.get_pos()))
def _on_horizontal_scroll(self, value: NumberType) -> None:
"""
Call when a horizontal scroll bar as changed to update the
position of the opposite one if it exists.
:param value: New position of the slider
"""
for sbar in self._scrollbars:
if sbar.get_orientation() == ORIENTATION_HORIZONTAL \
and self.get_hidden_width() != 0 \
and sbar.get_value() != value:
sbar.set_value(value)
def _on_vertical_scroll(self, value: NumberType) -> None:
"""
Call when a vertical scroll bar as changed to update the
position of the opposite one if it exists.
:param value: New position of the slider
"""
for sbar in self._scrollbars:
if sbar.get_orientation() == ORIENTATION_VERTICAL \
and self.get_hidden_height() != 0 \
and sbar.get_value() != value:
sbar.set_value(value)
def get_parent_scroll_value_percentage(self, orientation: str) -> Tuple[float]:
"""
Get percentage scroll values of scroll and parents; if ``0`` the scroll
is at top/left, ``1`` bottom/right.
:param orientation: Orientation. See :py:mod:`pygame_menu.locals`
:return: Value from ``0`` to ``1`` as a tuple; first item is the current scrollarea
"""
values = [self.get_scroll_value_percentage(orientation)]
parent = self._parent_scrollarea
if parent is not None:
while True: # Recursive
if parent is None:
break
values.append(parent.get_scroll_value_percentage(orientation))
parent = parent._parent_scrollarea
return tuple(values)
def get_scroll_value_percentage(self, orientation: str) -> float:
"""
Get the scroll value in percentage; if ``0`` the scroll is at top/left,
``1`` bottom/right.
.. note::
If ScrollArea does not contain such orientation scroll, ``-1`` is returned.
:param orientation: Orientation. See :py:mod:`pygame_menu.locals`
:return: Value from ``0`` to ``1``
"""
assert_orientation(orientation)
for sbar in self._scrollbars:
if not sbar.is_visible():
continue
if sbar.get_orientation() == orientation:
return sbar.get_value_percentage()
return -1
def scroll_to(self, orientation: str, value: NumberType) -> 'ScrollArea':
"""
Scroll to position in terms of the percentage.
:param orientation: Orientation. See :py:mod:`pygame_menu.locals`
:param value: If ``0`` scrolls to top/left, ``1`` to bottom/right
:return: Self reference
"""
assert_orientation(orientation)
assert isinstance(value, NumberInstance) and 0 <= value <= 1
for sbar in self._scrollbars:
if not sbar.is_visible():
continue
if sbar.get_orientation() == orientation:
v_min, v_max = sbar.get_minmax()
delta = v_max - v_min
new_value = int(min(v_min + delta * float(value), v_max))
sbar.set_value(new_value)
break
return self
# noinspection PyTypeChecker
def scroll_to_rect(
self,
rect: 'pygame.Rect',
margin: Tuple2NumberType = (0, 0),
scroll_parent: bool = True
) -> bool:
"""
Ensure that the given rect is in the viewable area.
:param rect: Rect in the world surface reference
:param margin: Extra margin around the rect on x-axis and y-axis in px
:param scroll_parent: If ``True`` parent scroll also | |
# coding: utf-8
"""Mapping of production and consumption mixes in Europe and their effect on
the carbon footprint of electric vehicles
This code performs the following:
- Import data from ENTSO-E (production quantities, trades relationships)
- Calculates the production and consumption electricity mixes for European countries
- Calculates the carbon footprint (CF) for the above electricity mixes](#CF_el)
- Calculates the production, use-phase and end-of-life emissions for battery electric vehicles (BEVs) under
the following assumptions:](#BEV_calcs)
- Production in Korea (with electricity intensity 684 g CO2-eq/kWh)
- Use phase uses country-specific production and consumption mix
- End-of-life emissions static for all countries
Requires the following files for input:
- ENTSO_production_volumes.csv (from hybridized_impact_factors.py)
- final_emission_factors.csv (from hybridized_impact_factors.py)
- trades.csv (from hybridized_impact_factors.py)
- trade_ef_hv.csv (from hybridized_impact_factors.py)
- API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv (transmission losses, from OECD)
- car_specifications.xlsx
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import country_converter as coco
import logging
#%% Main function
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False):
"""Run all electricity mix and vehicle calculations and exports results."""
# Korean el-mix 679 g CO2/kWh, from ecoinvent
fp = os.path.curdir
production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year)
codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations
results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens)
SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries)
pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data)
return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp
#%% Load and format data for calculations
def load_prep_el_data(fp, year):
"""Load electricity data and emissions factors."""
fp_output = os.path.join(fp, 'output')
# Output from bentso.py
filepath_production = os.path.join(fp_output, 'entsoe', 'ENTSO_production_volumes_'+ str(year) +'.csv')
filepath_intensities = os.path.join(fp_output, 'final_emission_factors_'+ str(year) +'.csv')
filepath_trades = os.path.join(fp_output, 'entsoe', 'trades_'+ str(year) +'.csv')
filepath_tradeonly_ef = os.path.join(fp_output, 'ecoinvent_ef_hv.csv')
# read in production mixes (annual average)
production = pd.read_csv(filepath_production, index_col=0)
production.rename_axis(index='', inplace=True)
# matrix of total imports/exports of electricity between regions; aka Z matrix
trades = pd.read_csv(filepath_trades, index_col=0)
trades.fillna(0, inplace=True) # replace np.nan with 0 for matrix math, below
# manually remove Cyprus for now
production.drop(index='CY', inplace=True)
trades = trades.drop(columns='CY').drop(index='CY')
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
""" Make into sum of production and production + import - export"""
country_total_prod_disagg = production.sum(axis=1)
country_total_cons_disagg = country_total_prod_disagg + imports - exports
waste = (production['Waste'] / production.sum(axis=1))
waste_min = waste[waste > 0].min()
waste_max = waste.max()
g_raw = production.sum(axis=1) # Vector of total electricity production (regionalized)
""" Read power plant CO2 intensities [tech averages] """
# average technology CO2 intensities (i.e., non-regionalized)
all_C = pd.read_csv(filepath_intensities, index_col=0)
all_C.drop(index='CY', inplace=True)
# use ecoinvent factors for these countries as a proxy to calculate consumption mixes for receiving countries
trade_ef = pd.read_csv(filepath_tradeonly_ef, index_col=[0, 1, 2, 3], header=[0])
trade_ef.index = trade_ef.index.droplevel([0, 1, 3]) # remove DSID, activityName and productName (leaving geography)
trade_ef.index.rename('geo', inplace=True)
trade_ef.columns = ['emission factor']
# Generate regionalized tech generation matrix
C = all_C.T
C.sort_index(axis=1, inplace=True)
C.sort_index(axis=0, inplace=True)
return production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C
#%% el_calcs
def el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data):
fp_data = os.path.join(fp, 'data')
# Make list of full-country resolution
original_countries = list(production.index)
# Make list of aggregated countries (affects Nordic countries + GB (UK+NI))
# read 3-letter ISO codes
countries = list(trades.index)
""" Calculates national production mixes and consumption mixes using Leontief assumption """
# Start electricity calculations (ELFP.m)
# Calculate production and consumption mixes
# Carbon intensity of production mix
CFPI_no_TD = pd.DataFrame(production.multiply(C.T).sum(axis=1) / production.sum(axis=1), columns=['Production mix intensity']) # production mix intensity without losses
CFPI_no_TD.fillna(0, inplace=True)
# List of countries that have trade relationships, but no production data
trade_only = list(set(trades.index) - set(production.loc[production.sum(axis=1) > 0].index))
# Add ecoinvent proxy emission factors for trade-only countries
logging.info('Replacing missing production mix intensities with values from ecoinvent:')
for country in trade_only:
if CFPI_no_TD.loc[country, 'Production mix intensity'] == 0:
logging.info(country)
CFPI_no_TD.loc[country] = trade_ef.loc[country].values
i = country_total_cons_disagg.size # Number of European regions
g = g_raw
g = g.sort_index() # total generation vector (local production for each country)
total_imported = trades.sum(axis=0) # sum rows for total imports
total_exported = trades.sum(axis=1) # sum columns for total exports
y = total_imported + g - total_exported # total final demand (consumption) of electricity
q = g + total_imported # vector of total consumption
q.replace(np.nan, 0, inplace=True)
if flowtrace_el:
# For flow tracing approach: make Leontief production functions (normalize columns of A)
# normalized trade matrix quadrant
Atmx = pd.DataFrame(np.matmul(trades, np.linalg.pinv(np.diag(q))))
# normalized production matrix quadrant
Agen = pd.DataFrame(np.diag(g) * np.linalg.pinv(np.diag(q)), index=countries, columns=countries) # coefficient matrix, generation
# "Trade" Leontief inverse
# Total imports from region i to j per unit demand on j
Ltmx = pd.DataFrame(np.linalg.pinv(np.identity(i) - Atmx), trades.columns, trades.index)
# Production in country i for trade to country j
# Total generation in i (rows) per unit demand j
Lgen = pd.DataFrame(np.matmul(Agen, Ltmx), index=Agen.index, columns=Ltmx.columns)
y_diag = pd.DataFrame(np.diag(y), index=countries, columns=countries)
# total imports for given demand
Xtmx = pd.DataFrame(np.matmul(np.linalg.pinv(np.identity(i) - Atmx), y_diag))
# Total generation to satisfy demand (consumption)
Xgen = np.matmul(np.matmul(Agen, Ltmx), y_diag)
Xgen.sum(axis=0)
Xgen_df = pd.DataFrame(Xgen, index=Agen.index, columns=y_diag.columns)
# ### Check electricity generated matches demand
totgen = Xgen.sum(axis=0)
r_gendem = totgen / y # All countries should be 1
#%% Generation techonlogy matrix
# TC is a country-by-generation technology matrix - normalized to share of total domestic generation, i.e., normalized generation/production mix
# technology generation, kWh/ kWh domestic generated electricity
TC = pd.DataFrame(np.matmul(np.linalg.pinv(np.diag(g)), production), index=g.index, columns=production.columns)
TCsum = TC.sum(axis=1) # Quality assurance - each country should sum to 1
# Calculate technology generation mix in GWh based on production in each region
TGP = pd.DataFrame(np.matmul(TC.transpose(), np.diag(g)), index=TC.columns, columns=g.index) #.== production
# Carbon intensity of consumption mix
CFCI_no_TD = pd.DataFrame(np.matmul(CFPI_no_TD.T.values, Lgen), columns=CFPI_no_TD.index).T
else:
# Use grid-average assumption for trade
prod_emiss = production.multiply(C.T).sum(axis=1)
trade_emiss = (pd.DataFrame(np.diag(CFPI_no_TD.iloc(axis=1)[0]), index=CFPI_no_TD.index, columns=CFPI_no_TD.index)).dot(trades)
CFCI_no_TD = pd.DataFrame((prod_emiss + trade_emiss.sum(axis=0) - trade_emiss.sum(axis=1)) / y)
CFCI_no_TD.columns = ['Consumption mix intensity']
# use ecoinvent for missing countries
if incl_ei:
CFCI_no_TD.update(trade_ef.rename(columns={'emission factor':'Consumption mix intensity'}))
#%% Calculate losses
# Transpose added after removing country aggregation as data pre-treatment
if include_TD_losses:
# Calculate technology characterization factors including transmission and distribution losses
# First, read transmission and distribution losses, downloaded from World Bank economic indicators (most recent values from 2014)
if isinstance(include_TD_losses, float):
TD_losses = include_TD_losses # apply constant transmission and distribution losses to all countries
elif isinstance(include_TD_losses, bool):
losses_fp = os.path.join(fp_data, 'API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv')
try:
TD_losses = pd.read_csv(losses_fp, skiprows=[0,1,2,3], usecols=[1, 58], index_col=0)
TD_losses = TD_losses.iloc[:, -7:].dropna(how='all', axis=1)
TD_losses = TD_losses.apply(lambda x: x / 100 + 1) # convert losses to a multiplicative factor
# ## Calculate total national carbon emissions from el - production and consumption mixes
TD_losses.index = coco.convert(names=TD_losses.index.tolist(), to='ISO2', not_found=None)
TD_losses = TD_losses.loc[countries]
TD_losses = pd.Series(TD_losses.iloc[:, 0])
except:
print("Warning! Transmission and distribution losses input files not found!")
TD_losses = pd.Series(np.zeros(len(production.index)), index=production.index)
else:
print('invalid entry for losses')
# Caclulate carbon intensity of production and consumption mixes including losses
CFPI_TD_losses = CFPI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0) # apply transmission and distribution losses to production mix intensity
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0)
if len(CFCI_TD_losses) < len(CFPI_TD_losses):
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0)
CFPI = CFPI_TD_losses
CFCI = CFCI_TD_losses
else:
CFPI = CFPI_no_TD
CFCI = CFCI_no_TD
elmixes = (CFPI.copy()).join(CFCI.copy()).T
#%%
# Aggregate multi-nodes to single countries using weighted average of production/consumption as appropriate
country_total_prod_disagg.columns = ["Total production (TWh)"]
country_total_prod_disagg.index = original_countries
country_total_cons_disagg.columns = ["Total consumption (TWh)"]
country_total_cons_disagg.index = original_countries
country_el = pd.concat([country_total_prod_disagg, country_total_cons_disagg], axis=1)
country_el.columns = ['Total production (TWh)', 'Total consumption (TWh)']
CFEL_mixes = elmixes.T
CFEL = pd.concat([country_el, CFEL_mixes], axis=1)
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
CFEL['Trade percentage, gross'] = (imports + exports) / CFEL['Total production (TWh)']
CFEL['Import percentage'] = imports / CFEL['Total production (TWh)']
CFEL['Export percentage'] = exports / CFEL['Total production (TWh)']
CFEL['imports'] = imports
CFEL['exports'] = exports
#Calculate total carbon footprint intensity ratio production vs consumption
rCP = CFCI['Consumption mix intensity'].divide(CFPI['Production mix intensity'])
rCP.columns = ["ratio consumption:production mix"]
# Export intermediate variables from calculations for troubleshooting
| |
#!/usr/bin/env python3
import sqlite3
import json
import flask
import re
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import re
import time
from plotly.validators.scatter.marker import SymbolValidator
import math
import dash_table
from dash.exceptions import PreventUpdate
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
##################################################################################################################
# loading files
##################################################################################################################
print("\n========= loading files =========\n")
## For each gene and year and cell type: how many papers?
gene_celltype_year_papers = pd.read_csv('data/gene_celltype_year_papers.csv')
## For each gene and year: how many papers?
gene_year_papers = pd.read_csv('data/gene_year_papers.csv')
## For each cell type & gene: What is the expression level and normalized number of papers?
gene_celltype_papercount_exp = pd.read_csv('data/gene_celltype_papercount_exp.csv')
## Genes shown initially when the page is loaded
starting_genes = open('data/starting_genes.csv').read().split(',')
## List of unique genes
all_genes = gene_celltype_year_papers['gene'].unique()
#################
# create the gene dropdown for genes with known names (ie. name != ensembl_id)
# data_genelist is: {"ensg":"gene symbol"}
data_genelist = pd.read_csv('data/genelist.csv')
genes_dropdown_options = { v[0]: v[1] for v in data_genelist[ ['Ensembl.Gene.ID', 'Associated.Gene.Name'] ].values }
genes_dropdown_options_inv = {v: k for k, v in genes_dropdown_options.items()}
dropdown_genesymbols_sorted = [ {'label': genes_dropdown_options[key], 'value':key} for key in genes_dropdown_options ]
dropdown_genesymbols_sorted = sorted(dropdown_genesymbols_sorted, key=lambda t : t["label"])
# add genes dictionary for genes display (this is a duplicate) .... TODO, we should not have this twice ideally.
# Genes dict is: {"ensg":"gene symbol - long name"}
genes_dict = pd.read_csv('data/mus_musculus_genes_dictionary.csv')
genes_dict = { gene[1]: gene[2] for gene in genes_dict.values }
# genes_dict = { gene[1]: gene[2] + ' - ' + gene[3] for gene in genes_dict.values }
# for genename in data_genelist["Ensembl.Gene.ID"].tolist():
# if genename not in genes_dict:
# genes_dict[genename] = genename
for g in genes_dict:
words = []
lines = []
for w in genes_dict[g].split(' '):
words.append(w)
if sum([len(word) for word in words]) > 40:
lines.append(' '.join(words))
words = []
if len(words):
lines.append(' '.join(words))
genes_dict[g] = '<br>'.join(lines)
## Load coordinate system names
coordinate_list_data = pd.read_csv("data/list_coordinates.csv")
coordinate_list = {v[0]: v[1] for v in coordinate_list_data[["coord_id","coord_name"]].values}
## Load cell types
conn = sqlite3.connect("file:data/totfeature.sqlite?mode=ro", uri=True)
celltype_data = pd.read_sql_query("SELECT DISTINCT ct from feature_matrix ORDER BY ct", conn)
celltype_list = celltype_data["ct"]
conn.close()
############################
## Load features (to be shown as color)
feature_data = pd.read_csv("data/feature_long_name.csv")
feature_list = { v[0]: v[1] for v in feature_data[ ['feature_id', 'feature_long_name']].values }
## Load cell types
celltype_dependence_data = pd.read_csv("data/list_coordinates.csv")
celltype_dependence_data.index = celltype_dependence_data["coord_id"].tolist()
##################################################################################################################
# Helper functions
##################################################################################################################
# def parse_genes(genes_textbox):
#
# '''
# sanitizes the list of genes provided in the textbox
# converting it into a python array of valid ensembl genes
# '''
#
# # split at commas and remove trailing spaces
# genes = [ g.strip() for g in genes_textbox.split(',') ]
#
# # replace gene names with their ensembl ids
# genes = [ genes_dropdown_options[g] if g in genes_dropdown_options else g for g in genes ]
#
# # removes mistyped and duplicate genes
# genes = list(set([ g for g in genes if g in genes_dictionary ]))
#
# return genes
###################################
def convert_genenames_to_ensg(genes):
# replace gene names with their ensembl ids
#genes = [ genes_dropdown_options[g] if g in genes_dropdown_options else g for g in genes ]
genes = [ genes_dropdown_options_inv[g] if g in genes_dropdown_options_inv else g for g in genes ]
# removes mistyped and duplicate genes
#genes = list(set([ g for g in genes if g in genes_dictionary ]))
genes = list(set([ g for g in genes if g in genes_dict ]))
return genes
###################################
def convert_ensg_to_genenames(genes):
genes = [ genes_dropdown_options[g] for g in genes ]
return genes
###################################
###################################
def parse_genes(genes_textbox):
'''
sanitizes the list of genes provided in the textbox
converting it into a python array of valid ensembl genes
'''
# split at commas and remove trailing spaces
genes = [ g.strip() for g in genes_textbox.split(',') ]
#print("type"+type(genes))
return convert_genenames_to_ensg(genes)
##################################################################################################################
# Function: Make the histogram of #citations over time for a given gene
##################################################################################################################
def histogram_citationsperyear(gene_id):
## Load histogram of citations for this gene
conn = sqlite3.connect("file:data/citations_per_year.sqlite?mode=ro", uri=True)
citationsperyear = pd.read_sql_query("SELECT * from citationsperyear where ensembl == ?", conn, params=(gene_id,))
conn.close()
##Check if there is any data to plot
if citationsperyear.shape[0]==0:
fig = go.Figure()
fig.update_layout( autosize= False, width = 400, height = 100, margin={'t':0, 'b':0,'l':0, 'r':0})
return fig
##Check if any genes should be highlighted
fig = go.Figure()
fig.add_trace(go.Scatter(
x=citationsperyear["year"].tolist(),
y=citationsperyear["citations"].tolist(),
fill='tozeroy')) # fill down to xaxis
fig.update_layout( autosize= False, width = 400, height = 100, margin={'t':0, 'b':0,'l':0, 'r':0})
return fig
##################################################################################################################
##### The main window layout
##################################################################################################################
print("\n========= starting server =========\n")
server = flask.Flask(__name__)
app = dash.Dash(
__name__,
server=server,
routes_pathname_prefix='/genepub/')
app.config.suppress_callback_exceptions = True ###Note, dangerous -- see if needed. used if components are added after initialization
app.title = "Data viewer: 10 reasons to study a gene"
app.layout = html.Div([
html.Div([
html.Div([
# textbox for selecting genes using ensembl id or names; all plots updates are connected to this element
html.Label('Selected gene:'),
dcc.Input(
id='gene-textbox',
type='text',
value='',
#list='list-suggested-inputs', #Don't suggest EnsemblIDs
placeholder='Comma-separated list of genes to inspect',
style={'width': '100%', 'height': '40px'}
),
# gene selection through dropdown; this will add the gene id to the textbox above
html.Div([
dcc.Dropdown(
id='genes-dropdown',
value ='',
options=dropdown_genesymbols_sorted,
placeholder='Select a gene using its name',)
], id='genes-dropdown-timestamp', n_clicks_timestamp = 1),
html.Div([html.Label(['Cell type:'])], style = {'display': 'block', 'width': '24%','height': '32px'} ),
dcc.Dropdown(
id='cell-type-selected',
value= 'T cell',
options=[{'label': i, 'value': i} for i in celltype_list],
placeholder = 'Cell type'),
html.Div([html.Label(['Coordinates:'])], style = {'display': 'block', 'width': '24%','height': '32px'} ),
dcc.Dropdown(
id='coord-id-selected',
placeholder = 'coordinate',
options=[{'label': coordinate_list[i], 'value': i} for i in coordinate_list],
value='coexp'),
html.Div([html.Label(['Color by:'])], style = {'display': 'block', 'width': '24%','height': '32px'} ),
dcc.Dropdown(
id='color-by-selected',
placeholder = 'color by',
options=[{'label': feature_list[i], 'value': i} for i in feature_list],
value='rank_pmid'),
html.Div([
html.A([
html.Img(src=app.get_asset_url('MIMS_logo_blue.svg'), style={
'height': '30px',
#'float':'right',
'padding': '10px 10px'}),
], href='http://www.mims.umu.se/')
], style={'text-align':'right'})
], style={
'border': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px',
'width':'100%'
}),
#html.Br(),
#####################################################
########## Gene information panel ###################
#####################################################
html.Div([
#html.H1(html.Label(id='geneinfo-symbol')),
html.H4(html.Label(id='geneinfo-longname')),
html.Label(id="geneinfo-firstcited"),
html.Label(id="geneinfo-numcitations"),
html.Label(id="geneinfo-numcitations-xg"),
html.Div([
dcc.Graph( id='citationsperyear-histogram')
],style={
'display': 'inline-block',
'margin': 'auto'
}),
html.Div([
html.A(['Ensembl'], id='ensembl-link', href='', target='_blank')," | ",
html.A(['UniProt'], id='uniprot-link', href='', target='_blank')," | ",
html.A(['PubMed'], id='pubmed-link', href='', target='_blank')," | ",
html.A(['Genecards'], id='genecards-link', href='', target='_blank')
])
], style={
'margin-top':'50px',
'border': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px',
'width':'100%'
}, id='geneinfo-div'),
], style={'float':'right','width':'25%', 'padding':'20px'}),
html.Div([
dcc.Graph( id='scatter-plot')#, figure=scatterplot())
],style={
'display': 'inline-block',
'margin': '0 auto'
}),
],style={
'position': 'inline-block',
'width': '95%',
'height': '95%',
'margin': '0 auto', #supposed to center it... but meah.
'padding':'0',
'overflow':'hidden'
})
##################################################################################################################
# page callbacks
# test genes = ENSMUSG00000000126,ENSMUSG00000095309,ENSMUSG00000097090
##################################################################################################################
################################################################################
@app.callback(
Output('gene-textbox', 'value'),
[Input('genes-dropdown', 'value')])
def update_genes_dropdown(dropdown_value):
##the genes textbox can be updated manually or using the dropdown
if dropdown_value is None:
return ''
else:
# import pdb; pdb.set_trace()
# print(type(dropdown_value))
return dropdown_value
##################################################################################################################
# Function: Make the scatter plot for all the genes
##################################################################################################################
@app.callback(Output('scatter-plot', 'figure'),
[Input('gene-textbox', 'value'),
Input('cell-type-selected', 'value'),
Input('coord-id-selected', 'value'),
Input('color-by-selected', 'value')])
def update_graph(selected_genes, celltype,coordid,color):
##Check if the coordinates are per cell type; otherwise just use the columns x,y
celltype_dependence = celltype_dependence_data.loc[coordid,"perct"]
if celltype_dependence:
xcoord_name = "x_"+celltype
ycoord_name = "y_"+celltype
else:
xcoord_name = "x"
ycoord_name = "y"
##Load coordinates
conn = sqlite3.connect("file:data/coord_" + coordid + ".sqlite?mode=ro", uri=True)
coord_data = pd.read_sql_query("SELECT gene, `"+xcoord_name+"` as x, `"+ycoord_name+"` as y from coord", conn)
conn.close()
##Load the feature we need
conn = sqlite3.connect("file:data/totfeature.sqlite?mode=ro", uri=True)
feature_data = pd.read_sql_query("SELECT gene, `"+color+"` as feature from feature_matrix where ct == ?", conn, params=(celltype,))
conn.close()
##Merge coordinates and features
coord_data_plot = coord_data.merge(feature_data,left_on = "gene", right_on = "gene", indicator = True)
##Check if there is any data to plot, otherwise return an empty graph
if coord_data_plot.shape[0]==0:
fig = go.Figure()
fig.update_layout( autosize= False, width = 800, height = 800)
return fig
##Check if any genes are selected and should be highlighted
if not len(selected_genes):
selected_genes = []
else:
selected_genes = list(selected_genes.split(","))
selected_genes = convert_ensg_to_genenames(selected_genes)
##Drop crappy values - usually missing features
coord_data_plot = coord_data_plot.dropna()
#Extract X,Y coordinates of selected genes
vlines = [coord_data_plot["x"].values[i] for i,v in enumerate(coord_data_plot["gene"].tolist()) if v in selected_genes]
hlines = [coord_data_plot["y"].values[i] for i,v in enumerate(coord_data_plot["gene"].tolist()) if v in selected_genes]
#Extarct X,Y coordinates of all genes, their feature value (color), and the name
xaxis = coord_data_plot["x"].values.tolist()
yaxis = coord_data_plot["y"].values.tolist()
markercolor = coord_data_plot["feature"].values.tolist()
textvalues = coord_data_plot["gene"].values.tolist()
#Create the basic plot
fig = go.Figure(
go.Scatter(
x = xaxis,
y = yaxis,
mode = "markers",
marker_color = markercolor,
text = textvalues,
opacity = 1.0))
#Add cross-hairs to all the selected genes
shapes_y=[{'type': 'line',
'y0':y_intercept,
'y1':y_intercept,
'x0':str(min(xaxis)),
'x1':str(max(xaxis)),
'line': {'color': 'black', 'width': 1, 'dash': 'dot'}}
for i, y_intercept in enumerate(hlines)]
shapes_x=[{'type': 'line',
'x0':x_intercept,
'x1':x_intercept,
'y0':str(min(yaxis)),
'y1':str(max(yaxis)),
'line': {'color': 'black', 'width': 1, 'dash': 'dot'}}
for i, x_intercept in enumerate(vlines)]
fig.layout.update(shapes=shapes_x+shapes_y)
fig.update_layout( | |
# SimpleCV Cameras & Devices
#load system libraries
from SimpleCV.base import *
from SimpleCV.ImageClass import Image, ImageSet, ColorSpace
from SimpleCV.Display import Display
from SimpleCV.Color import Color
#Globals
_cameras = []
_camera_polling_thread = ""
class FrameBufferThread(threading.Thread):
"""
**SUMMARY**
This is a helper thread which continually debuffers the camera frames. If
you don't do this, cameras may constantly give you a frame behind, which
causes problems at low sample rates. This makes sure the frames returned
by your camera are fresh.
"""
def run(self):
global _cameras
while (1):
for cam in _cameras:
if cam.pygame_camera:
cam.pygame_buffer = cam.capture.get_image(cam.pygame_buffer)
else:
cv.GrabFrame(cam.capture)
cam._threadcapturetime = time.time()
time.sleep(0.04) #max 25 fps, if you're lucky
class FrameSource:
"""
**SUMMARY**
An abstract Camera-type class, for handling multiple types of video input.
Any sources of images inheirit from it
"""
_calibMat = "" #Intrinsic calibration matrix
_distCoeff = "" #Distortion matrix
_threadcapturetime = '' #when the last picture was taken
capturetime = '' #timestamp of the last aquired image
def __init__(self):
return
def getProperty(self, p):
return None
def getAllProperties(self):
return {}
def getImage(self):
return None
def calibrate(self, imageList, grid_sz=0.03, dimensions=(8, 5)):
"""
**SUMMARY**
Camera calibration will help remove distortion and fisheye effects
It is agnostic of the imagery source, and can be used with any camera
The easiest way to run calibration is to run the
calibrate.py file under the tools directory for SimpleCV.
This will walk you through the calibration process.
**PARAMETERS**
* *imageList* - is a list of images of color calibration images.
* *grid_sz* - is the actual grid size of the calibration grid, the unit used will be
the calibration unit value (i.e. if in doubt use meters, or U.S. standard)
* *dimensions* - is the the count of the *interior* corners in the calibration grid.
So for a grid where there are 4x4 black grid squares has seven interior corners.
**RETURNS**
The camera's intrinsic matrix.
**EXAMPLE**
See :py:module:calibrate.py
"""
# This routine was adapted from code originally written by:
# <NAME> -- <EMAIL>
# See: https://github.com/abidrahmank/OpenCV-Python/blob/master/Other_Examples/camera_calibration.py
warn_thresh = 1
n_boards = 0 #no of boards
board_w = int(dimensions[0]) # number of horizontal corners
board_h = int(dimensions[1]) # number of vertical corners
n_boards = int(len(imageList))
board_n = board_w * board_h # no of total corners
board_sz = (board_w, board_h) #size of board
if( n_boards < warn_thresh ):
logger.warning("FrameSource.calibrate: We suggest using 20 or more images to perform camera calibration!" )
# creation of memory storages
image_points = cv.CreateMat(n_boards * board_n, 2, cv.CV_32FC1)
object_points = cv.CreateMat(n_boards * board_n, 3, cv.CV_32FC1)
point_counts = cv.CreateMat(n_boards, 1, cv.CV_32SC1)
intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
distortion_coefficient = cv.CreateMat(5, 1, cv.CV_32FC1)
# capture frames of specified properties and modification of matrix values
i = 0
z = 0 # to print number of frames
successes = 0
imgIdx = 0
# capturing required number of views
while(successes < n_boards):
found = 0
img = imageList[imgIdx]
(found, corners) = cv.FindChessboardCorners(img.getGrayscaleMatrix(), board_sz,
cv.CV_CALIB_CB_ADAPTIVE_THRESH |
cv.CV_CALIB_CB_FILTER_QUADS)
corners = cv.FindCornerSubPix(img.getGrayscaleMatrix(), corners,(11, 11),(-1, -1),
(cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
# if got a good image,draw chess board
if found == 1:
corner_count = len(corners)
z = z + 1
# if got a good image, add to matrix
if len(corners) == board_n:
step = successes * board_n
k = step
for j in range(board_n):
cv.Set2D(image_points, k, 0, corners[j][0])
cv.Set2D(image_points, k, 1, corners[j][1])
cv.Set2D(object_points, k, 0, grid_sz*(float(j)/float(board_w)))
cv.Set2D(object_points, k, 1, grid_sz*(float(j)%float(board_w)))
cv.Set2D(object_points, k, 2, 0.0)
k = k + 1
cv.Set2D(point_counts, successes, 0, board_n)
successes = successes + 1
# now assigning new matrices according to view_count
if( successes < warn_thresh ):
logger.warning("FrameSource.calibrate: You have %s good images for calibration we recommend at least %s" % (successes, warn_thresh))
object_points2 = cv.CreateMat(successes * board_n, 3, cv.CV_32FC1)
image_points2 = cv.CreateMat(successes * board_n, 2, cv.CV_32FC1)
point_counts2 = cv.CreateMat(successes, 1, cv.CV_32SC1)
for i in range(successes * board_n):
cv.Set2D(image_points2, i, 0, cv.Get2D(image_points, i, 0))
cv.Set2D(image_points2, i, 1, cv.Get2D(image_points, i, 1))
cv.Set2D(object_points2, i, 0, cv.Get2D(object_points, i, 0))
cv.Set2D(object_points2, i, 1, cv.Get2D(object_points, i, 1))
cv.Set2D(object_points2, i, 2, cv.Get2D(object_points, i, 2))
for i in range(successes):
cv.Set2D(point_counts2, i, 0, cv.Get2D(point_counts, i, 0))
cv.Set2D(intrinsic_matrix, 0, 0, 1.0)
cv.Set2D(intrinsic_matrix, 1, 1, 1.0)
rcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
tcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
# camera calibration
cv.CalibrateCamera2(object_points2, image_points2, point_counts2,
(img.width, img.height), intrinsic_matrix,distortion_coefficient,
rcv, tcv, 0)
self._calibMat = intrinsic_matrix
self._distCoeff = distortion_coefficient
return intrinsic_matrix
def getCameraMatrix(self):
"""
**SUMMARY**
This function returns a cvMat of the camera's intrinsic matrix.
If there is no matrix defined the function returns None.
"""
return self._calibMat
def undistort(self, image_or_2darray):
"""
**SUMMARY**
If given an image, apply the undistortion given by the camera's matrix and return the result.
If given a 1xN 2D cvmat or a 2xN numpy array, it will un-distort points of
measurement and return them in the original coordinate system.
**PARAMETERS**
* *image_or_2darray* - an image or an ndarray.
**RETURNS**
The undistorted image or the undistorted points. If the camera is un-calibrated
we return None.
**EXAMPLE**
>>> img = cam.getImage()
>>> result = cam.undistort(img)
"""
if(type(self._calibMat) != cv.cvmat or type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.undistort: This operation requires calibration, please load the calibration matrix")
return None
if (type(image_or_2darray) == InstanceType and image_or_2darray.__class__ == Image):
inImg = image_or_2darray # we have an image
retVal = inImg.getEmpty()
cv.Undistort2(inImg.getBitmap(), retVal, self._calibMat, self._distCoeff)
return Image(retVal)
else:
mat = ''
if (type(image_or_2darray) == cv.cvmat):
mat = image_or_2darray
else:
arr = cv.fromarray(np.array(image_or_2darray))
mat = cv.CreateMat(cv.GetSize(arr)[1], 1, cv.CV_64FC2)
cv.Merge(arr[:, 0], arr[:, 1], None, None, mat)
upoints = cv.CreateMat(cv.GetSize(mat)[1], 1, cv.CV_64FC2)
cv.UndistortPoints(mat, upoints, self._calibMat, self._distCoeff)
#undistorted.x = (x* focalX + principalX);
#undistorted.y = (y* focalY + principalY);
return (np.array(upoints[:, 0]) *\
[self.getCameraMatrix()[0, 0], self.getCameraMatrix()[1, 1]] +\
[self.getCameraMatrix()[0, 2], self.getCameraMatrix()[1, 2]])[:, 0]
def getImageUndistort(self):
"""
**SUMMARY**
Using the overridden getImage method we retrieve the image and apply the undistortion
operation.
**RETURNS**
The latest image from the camera after applying undistortion.
**EXAMPLE**
>>> cam = Camera()
>>> cam.loadCalibration("mycam.xml")
>>> while True:
>>> img = cam.getImageUndistort()
>>> img.show()
"""
return self.undistort(self.getImage())
def saveCalibration(self, filename):
"""
**SUMMARY**
Save the calibration matrices to file. The file name should be without the extension.
The default extension is .xml.
**PARAMETERS**
* *fileneame* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was saved , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
if( type(self._calibMat) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration matrix present, can't save.")
else:
intrFName = filename + "Intrinsic.xml"
cv.Save(intrFName, self._calibMat)
if( type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration distortion present, can't save.")
else:
distFName = filename + "Distortion.xml"
cv.Save(distFName, self._distCoeff)
return None
def loadCalibration(self, filename):
"""
**SUMMARY**
Load a calibration matrix from file.
The filename should be the stem of the calibration files names.
e.g. If the calibration files are MyWebcamIntrinsic.xml and MyWebcamDistortion.xml
then load the calibration file "MyWebcam"
**PARAMETERS**
* *fileneame* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was loaded , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
retVal = False
intrFName = filename + "Intrinsic.xml"
self._calibMat = cv.Load(intrFName)
distFName = filename + "Distortion.xml"
self._distCoeff = cv.Load(distFName)
if( type(self._distCoeff) == cv.cvmat
and type(self._calibMat) == cv.cvmat):
retVal = True
return retVal
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
**EXAMPLE**
To use it's as simple as:
>>> cam = Camera()
>>> cam.live()
Left click will show mouse coordinates and color
Right click will kill the live image
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self.getImage()
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self.getImage()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + | |
import torch
import torch.nn.functional as F
import torch.nn as nn
# Custom imports
from .base_semi_apppearance_temporal_simclr import SemiAppTemp_SimCLR_BaseRecognizer
from ..builder import RECOGNIZERS
from ..losses import CosineSimiLoss
from ...utils import GatherLayer
@RECOGNIZERS.register_module()
class Semi_AppSup_TempSup_SimCLR_Crossclip_PTV_Recognizer3D(SemiAppTemp_SimCLR_BaseRecognizer):
"""Semi-supervised 3D recognizer model framework."""
def forward_train(self, imgs, labels, imgs_weak, imgs_strong, labels_unlabeled, imgs_appearance,
imgs_diff_labeled, imgs_diff_weak, imgs_diff_strong, cur_epoch=None):
"""Defines the computation performed at every call when training."""
clips_per_video = imgs.shape[1]
bz_labeled = imgs.shape[0] * imgs.shape[1]
bz_unlabeled = imgs_weak.shape[0] * imgs_weak.shape[1]
img_shape_template = imgs.shape[2:]
imgs = imgs.transpose(0, 1).reshape((-1,) + img_shape_template)
imgs_weak = imgs_weak.transpose(0, 1).reshape((-1,) + img_shape_template)
imgs_strong = imgs_strong.transpose(0, 1).reshape((-1,) + img_shape_template)
imgs_diff_shape_template = imgs_diff_labeled.shape[2:]
imgs_diff_labeled = imgs_diff_labeled.transpose(0, 1).reshape((-1,) + imgs_diff_shape_template)
imgs_diff_weak = imgs_diff_weak.transpose(0, 1).reshape((-1,) + imgs_diff_shape_template)
imgs_diff_strong = imgs_diff_strong.transpose(0, 1).reshape((-1,) + imgs_diff_shape_template)
imgs_all = torch.cat([imgs, imgs_weak, imgs_strong], dim=0)
imgs_diff_all = torch.cat([imgs_diff_labeled, imgs_diff_weak, imgs_diff_strong], dim=0)
# TODO: If we forward imgs_weak with no_grad, and then jointly forward imgs and imgs_strong,
# we might be able to save memory for a larger batch size? But not sure if this has
# negative impact on batch-norm.
# imgs_all = imgs_all.reshape((-1,) + imgs_all.shape[2:])
# imgs_diff_all = imgs_diff_all.reshape((-1,) + imgs_diff_all.shape[2:])
labels = labels.transpose(0, 1).reshape((-1, 1))
# print(labels)
if self.temp_align_indices is not None:
vid_features = self.extract_feat(imgs_all)
vid_feature = vid_features[-1]
imgs_diff_features = self.temp_backbone(imgs_diff_all)
imgs_diff_feature = imgs_diff_features[-1]
else:
vid_feature = self.extract_feat(imgs_all)
imgs_diff_feature = self.temp_backbone(imgs_diff_all)
# cls_score = self.cls_head(vid_feature)
cls_score = dict()
# cls_score_rgb, cls_score_diff = torch.split(cls_score_concat, bz_labeled+2*bz_unlabeled, dim=0)
cls_score['rgb'] = self.cls_head(vid_feature)
cls_score['diff'] = self.temp_sup_head(imgs_diff_feature)
batch_total_len = bz_labeled + bz_unlabeled
# NOTE: pre-softmx logit
cls_score_labeled = dict()
cls_score_weak = dict()
cls_score_strong = dict()
for view in ['rgb', 'diff']:
cls_score_labeled[view] = cls_score[view][:bz_labeled, :]
if self.loss_clip_selection == 0:
cls_score_weak[view] = cls_score[view][bz_labeled:(bz_labeled + bz_unlabeled // 2), :]
cls_score_strong[view] = cls_score[view][
(bz_labeled + bz_unlabeled):(bz_labeled + bz_unlabeled + bz_unlabeled // 2), :]
else:
cls_score_weak[view] = cls_score[view][bz_labeled:bz_labeled + bz_unlabeled, :]
cls_score_strong[view] = cls_score[view][bz_labeled + bz_unlabeled:bz_labeled + 2 * bz_unlabeled, :]
loss = dict()
if 'weak' in self.crossclip_contrast_range and 'strong' in self.crossclip_contrast_range:
query_index = torch.cat([torch.arange(bz_labeled // clips_per_video), torch.arange(bz_unlabeled // clips_per_video) + bz_labeled,
torch.arange(bz_unlabeled // clips_per_video) + bz_labeled + bz_unlabeled])
key_mask = torch.ones(bz_labeled + 2 * bz_unlabeled)
key_mask[query_index] = 0
key_index = torch.arange(bz_labeled + 2 * bz_unlabeled)[key_mask.bool()]
elif 'weak' in self.crossclip_contrast_range:
query_index = torch.cat([torch.arange(bz_labeled // clips_per_video), torch.arange(bz_unlabeled // clips_per_video) + bz_labeled])
key_mask = torch.ones(bz_labeled + bz_unlabeled)
key_mask[query_index] = 0
key_index = torch.arange(bz_labeled + bz_unlabeled)[key_mask.bool()]
elif 'strong' in self.crossclip_contrast_range:
query_index = torch.arange(bz_unlabeled // clips_per_video) + bz_labeled + bz_unlabeled
key_index = query_index + (bz_unlabeled // clips_per_video)
else:
pass
# print(query_index, key_index)
if 'rgb' in self.crossclip_contrast_loss:
contrast_rgb_feature = self.contrast_head_rgb(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
loss['loss_rgb_contrast'] = self.contrast_loss_weight * \
self.contrast_head_rgb.loss([rgb_query, rgb_key])
if 'tg' in self.crossclip_contrast_loss:
contrast_tg_feature = self.contrast_head_tg(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
loss['loss_tg_contrast'] = self.contrast_loss_weight * \
self.contrast_head_tg.loss([tg_query, tg_key])
if 'crossview' in self.crossclip_contrast_loss:
contrast_rgb_feature = self.contrast_head_rgb(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
contrast_tg_feature = self.contrast_head_tg(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
embedding_rgb1_tg2 = [rgb_query, tg_key]
embedding_rgb2_tg1 = [rgb_key, tg_query]
loss['loss_crossview_contrast'] = self.contrast_loss_weight * (
self.contrast_head_rgb.loss(embedding_rgb1_tg2) +
self.contrast_head_tg.loss(embedding_rgb2_tg1)) / 2
if 'crossview_sharedhead' in self.crossclip_contrast_loss:
contrast_rgb_feature = self.contrast_head_shared(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
contrast_tg_feature = self.contrast_head_shared(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
embedding_rgb1_tg2 = [rgb_query, tg_key]
embedding_rgb2_tg1 = [rgb_key, tg_query]
loss['loss_crossview_contrast'] = self.contrast_loss_weight * (
self.contrast_head_shared.loss(embedding_rgb1_tg2) +
self.contrast_head_shared.loss(embedding_rgb2_tg1)) / 2
if 'crossview_sameclip' in self.crossclip_contrast_loss:
if clips_per_video > 1:
contrast_rgb_feature = self.contrast_head_rgb(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
contrast_tg_feature = self.contrast_head_tg(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
embedding_rgb1_tg1 = [rgb_query, tg_query]
embedding_rgb2_tg2 = [rgb_key, tg_key]
loss['loss_crossview_contrast'] = self.contrast_loss_weight * (
self.contrast_head_rgb.loss(embedding_rgb1_tg1) +
self.contrast_head_tg.loss(embedding_rgb2_tg2)) / 2
else:
contrast_rgb_feature = self.contrast_head_rgb(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
contrast_tg_feature = self.contrast_head_tg(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
embedding_rgb1_tg1 = [rgb_query, tg_query]
loss['loss_crossview_contrast'] = self.contrast_loss_weight * \
self.contrast_head_rgb.loss(embedding_rgb1_tg1)
if 'crossview_sameclip_sharedhead' in self.crossclip_contrast_loss:
if clips_per_video > 1:
contrast_rgb_feature = self.contrast_head_shared(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
contrast_tg_feature = self.contrast_head_shared(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
embedding_rgb1_tg1 = [rgb_query, tg_query]
embedding_rgb2_tg2 = [rgb_key, tg_key]
loss['loss_crossview_contrast'] = self.contrast_loss_weight * (
self.contrast_head_shared.loss(embedding_rgb1_tg1) +
self.contrast_head_shared.loss(embedding_rgb2_tg2)) / 2
else:
contrast_rgb_feature = self.contrast_head_shared(vid_feature)
rgb_query = contrast_rgb_feature[query_index]
contrast_tg_feature = self.contrast_head_shared(imgs_diff_feature)
tg_query = contrast_tg_feature[query_index]
embedding_rgb1_tg1 = [rgb_query, tg_query]
loss['loss_crossview_contrast'] = self.contrast_loss_weight * \
self.contrast_head_shared.loss(embedding_rgb1_tg1)
if 'crossview_crossclip_densecl' in self.crossclip_contrast_loss:
contrast_rgb_feature = self.contrast_densecl_head(vid_feature)
rgb_query_backbone = vid_feature[query_index]
rgb_key_backbone = vid_feature[key_index]
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
contrast_tg_feature = self.contrast_densecl_head(imgs_diff_feature)
tg_query_backbone = imgs_diff_feature[query_index]
tg_key_backbone = imgs_diff_feature[key_index]
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
n = rgb_query.size(0)
c = vid_feature.size(1)
loss['loss_crossview_dense_contrast'] = list()
for rgb_grid, tg_grid, rgb_grid_b, tg_grid_b in \
[(rgb_query, tg_key, rgb_query_backbone, tg_key_backbone),
(rgb_key, tg_query, rgb_key_backbone, tg_query_backbone)]:
rgb_grid = rgb_grid.view(n, c, -1)
rgb_grid = nn.functional.normalize(rgb_grid, dim=1)
tg_grid = tg_grid.view(n, c, -1)
tg_grid = nn.functional.normalize(tg_grid, dim=1)
rgb_grid_b = rgb_grid_b.view(rgb_grid_b.size(0), rgb_grid_b.size(1), -1)
tg_grid_b = tg_grid_b.view(tg_grid_b.size(0), tg_grid_b.size(1), -1)
rgb_grid_b = nn.functional.normalize(rgb_grid_b, dim=1)
tg_grid_b = nn.functional.normalize(tg_grid_b, dim=1)
# print(rgb_grid_b.shape)
# print(tg_grid_b.shape)
backbone_sim_matrix = torch.matmul(rgb_grid_b.permute(0, 2, 1), tg_grid_b)
densecl_sim_ind = backbone_sim_matrix.max(dim=2)[1] # NxS^2
indexed_tg_grid = torch.gather(tg_grid, 2,
densecl_sim_ind.unsqueeze(1).expand(-1, rgb_grid.size(1), -1))
rgb_embedding = rgb_grid.view(rgb_grid.size(0), rgb_grid.size(1), -1).permute(0, 2, 1).reshape(-1, c)
tg_embedding = indexed_tg_grid.view(indexed_tg_grid.size(0), indexed_tg_grid.size(1), -1).permute(0, 2, 1).reshape(-1, c)
contrastive_embedding = [rgb_embedding, tg_embedding]
loss['loss_crossview_dense_contrast'].append(self.contrast_densecl_head.loss(contrastive_embedding))
loss['loss_crossview_dense_contrast'] = torch.mean(torch.stack(loss['loss_crossview_dense_contrast']))
if 'sameview_crossclip_densecl' in self.crossclip_contrast_loss:
contrast_rgb_feature = self.contrast_densecl_head(vid_feature)
rgb_query_backbone = vid_feature[query_index]
rgb_key_backbone = vid_feature[key_index]
rgb_query = contrast_rgb_feature[query_index]
rgb_key = contrast_rgb_feature[key_index]
contrast_tg_feature = self.contrast_densecl_head(imgs_diff_feature)
tg_query_backbone = imgs_diff_feature[query_index]
tg_key_backbone = imgs_diff_feature[key_index]
tg_query = contrast_tg_feature[query_index]
tg_key = contrast_tg_feature[key_index]
n = rgb_query.size(0)
c = vid_feature.size(1)
loss['loss_crossview_dense_contrast'] = list()
for q_grid, k_grid, q_grid_b, k_grid_b in \
[(rgb_query, rgb_key, rgb_query_backbone, rgb_key_backbone),
(tg_query, tg_key, tg_query_backbone, tg_key_backbone)]:
q_grid = q_grid.view(n, c, -1)
q_grid = nn.functional.normalize(q_grid, dim=1)
k_grid = k_grid.view(n, c, -1)
k_grid = nn.functional.normalize(k_grid, dim=1)
q_grid_b = q_grid_b.view(q_grid_b.size(0), q_grid_b.size(1), -1)
k_grid_b = k_grid_b.view(k_grid_b.size(0), k_grid_b.size(1), -1)
q_grid_b = nn.functional.normalize(q_grid_b, dim=1)
k_grid_b = nn.functional.normalize(k_grid_b, dim=1)
# print(rgb_grid_b.shape)
# print(tg_grid_b.shape)
backbone_sim_matrix = torch.matmul(q_grid_b.permute(0, 2, 1), k_grid_b)
densecl_sim_ind = backbone_sim_matrix.max(dim=2)[1] # NxS^2
indexed_k_grid = torch.gather(k_grid, 2, densecl_sim_ind.unsqueeze(1).expand(-1, k_grid.size(1), -1))
q_embedding = q_grid.view(q_grid.size(0), q_grid.size(1), -1).permute(0, 2, 1).reshape(-1, c)
k_embedding = indexed_k_grid.view(indexed_k_grid.size(0), indexed_k_grid.size(1), -1).permute(0, 2, 1).reshape(-1, c)
contrastive_embedding = [q_embedding, k_embedding]
loss['loss_crossview_dense_contrast'].append(self.contrast_densecl_head.loss(contrastive_embedding))
loss['loss_crossview_dense_contrast'] = torch.mean(torch.stack(loss['loss_crossview_dense_contrast']))
if self.loss_lambda is not None and 'loss_crossview_dense_contrast' in loss.keys() and 'loss_crossview_contrast' in loss.keys():
loss['loss_crossview_dense_contrast'] = self.loss_lambda * loss['loss_crossview_dense_contrast']
loss['loss_crossview_contrast'] = (1 - self.loss_lambda) * loss['loss_crossview_contrast']
if cur_epoch < self.contrast_warmup_epoch:
for contrast_loss_type in ['loss_rgb_contrast', 'loss_tg_contrast', 'loss_crossview_contrast',
'loss_crossview_dense_contrast']:
if contrast_loss_type in loss.keys():
loss[contrast_loss_type] = loss[contrast_loss_type].detach()
# if self.use_temp_contrast:
# cls_score_temp = self.cls_head_temp(vid_feature)
# vid_temporal = cls_score_temp[:batch_total_len, :]
# imgs_diff_temporal = self.temp_contrast_head(imgs_diff_feature[:batch_total_len, :])
#
# vid_temporal_pred = self.simsiam_temp_pred(vid_temporal)
# imgs_diff_temporal_pred = self.simsiam_temp_pred(imgs_diff_temporal)
# embedding_temp = [vid_temporal_pred, imgs_diff_temporal_pred, vid_temporal.detach(), imgs_diff_temporal.detach()]
# loss['loss_temporal_contrast'] = self.simsiam_temp_pred.loss(embedding_temp)
if self.temp_align_indices is not None:
for i, layer_idx in enumerate(self.temp_align_indices):
if self.align_stop_grad == 'tg':
# if self.align_with_correspondence:
# c = vid_features[layer_idx].size(1)
# q_grid = vid_features[layer_idx][:batch_total_len, ...].view(batch_total_len, c, -1)
# k_grid = imgs_diff_features[layer_idx][:batch_total_len, ...].detach().view(batch_total_len, c,
# -1)
# if self.align_with_correspondence == 'normalized_cosine':
# q_grid = nn.functional.normalize(q_grid, dim=1)
# k_grid = nn.functional.normalize(k_grid, dim=1)
# backbone_sim_matrix = torch.matmul(q_grid.permute(0, 2, 1), k_grid)
# densecl_sim_ind = backbone_sim_matrix.max(dim=2)[1] # NxS^2
# indexed_k_grid = torch.gather(k_grid, 2,
# densecl_sim_ind.unsqueeze(1).expand(-1, k_grid.size(1), -1))
# loss[f'loss_layer{layer_idx}_align'] = self.align_criterion(
# q_grid, indexed_k_grid
# )
if self.loss_clip_selection == 0:
align_selection_index = torch.cat(
[torch.arange(bz_labeled // 2), torch.arange(bz_unlabeled // 2) + bz_labeled])
loss[f'loss_layer{layer_idx}_align'] = self.align_criterion(
vid_features[layer_idx][align_selection_index],
imgs_diff_features[layer_idx][align_selection_index].detach())
else:
loss[f'loss_layer{layer_idx}_align'] = self.align_criterion(
vid_features[layer_idx][:batch_total_len, ...],
imgs_diff_features[layer_idx][:batch_total_len, ...].detach())
elif self.align_stop_grad == 'rgb':
loss[f'loss_layer{layer_idx}_align'] = self.align_criterion(
vid_features[layer_idx][:batch_total_len, ...].detach(),
imgs_diff_features[layer_idx][:batch_total_len, ...])
elif self.align_stop_grad is None:
loss[f'loss_layer{layer_idx}_align'] = self.align_criterion(
vid_features[layer_idx][:batch_total_len, ...],
imgs_diff_features[layer_idx][:batch_total_len, ...])
if self.densecl_indices is not None:
if self.densecl_indices == (3,):
layer_idx = self.densecl_indices[0]
# b = vid_feature.size(0)
c = vid_feature.size(1)
q_grid = vid_feature[:batch_total_len, ...].view(batch_total_len, c, -1)
q_grid = nn.functional.normalize(q_grid, dim=1)
k_grid = imgs_diff_feature[:batch_total_len, ...].view(batch_total_len, c, -1)
k_grid = nn.functional.normalize(k_grid, dim=1)
q_grid_pred = self.simsiam_densecl_pred(vid_feature)
k_grid_pred = self.simsiam_densecl_pred(imgs_diff_feature.detach())
q_grid_pred = q_grid_pred[:batch_total_len, ...].view(batch_total_len, c, -1)
q_grid_pred = nn.functional.normalize(q_grid_pred, dim=1)
k_grid_pred = k_grid_pred[:batch_total_len, ...].view(batch_total_len, c, -1)
k_grid_pred = nn.functional.normalize(k_grid_pred, dim=1)
# backbone_sim_matrix = torch.matmul(q_grid.permute(0, 2, 1), k_grid)
#
# # densecl_sim_ind = backbone_sim_matrix.max(dim=2)[1] # NxS^2
# #
# # indexed_k_grid = torch.gather(k_grid, 2, densecl_sim_ind.unsqueeze(1).expand(-1, k_grid.size(1), -1))
self.cosine_align_criterion = CosineSimiLoss(dim=1)
if self.densecl_strategy == 'shared_pred_stop_tg':
loss[f'loss_layer{layer_idx}_dense'] = self.cosine_align_criterion(q_grid_pred, k_grid_pred)
elif self.densecl_strategy == 'rgb_pred_stop_tg':
loss[f'loss_layer{layer_idx}_dense'] = self.cosine_align_criterion(q_grid_pred, k_grid)
elif self.densecl_strategy == 'tg_pred_stop_tg':
loss[f'loss_layer{layer_idx}_dense'] = self.cosine_align_criterion(q_grid, k_grid_pred)
elif self.densecl_strategy == 'simsiam':
embedding_rgb = [q_grid_pred, k_grid_pred, q_grid.detach(), k_grid.detach()]
loss[f'loss_layer{layer_idx}_dense_simsiam'] = self.simsiam_densecl_pred.loss(embedding_rgb)
with torch.no_grad():
loss[f'layer{layer_idx}_align_L2'] = torch.nn.MSELoss()(
vid_feature[:batch_total_len, ...],
imgs_diff_feature[:batch_total_len, ...]
)
else:
raise NotImplementedError
gt_labels = labels.squeeze()
# if torch.distributed.get_rank() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.