text stringlengths 957 885k |
|---|
<filename>du/android/adb/App.py
from os import system
from du.android.adb.Adb import Adb
import sys
import logging
import cmd2
import time
import argparse
from collections import namedtuple
from cmd2 import with_argparser
logger = logging.getLogger(__name__.split(".")[-1])
ProcInfo = namedtuple("ProcInfo", "cpu,memory")
SystemInfo = namedtuple("SystemInfo", "memFree")
MemInfo = namedtuple("MemInfo", "java, native, code, total")
class App(cmd2.Cmd):
"""
Interactive & scriptable wrapper over Adb
"""
def __init__(self):
super().__init__()
self.__adb = Adb()
def do_connect(self, address):
logger.info("connecting to {} ..".format(address))
self.__adb.connect(address)
logger.info("connected OK")
def do_remount(self, args):
self.__adb.remount()
def do_root(self, args):
self.__adb.root()
def do_disableSeLinux(self, args):
self.__adb.setSeLinuxEnabled(False)
# waitProcessStart arguments parser
startArgParser = argparse.ArgumentParser()
startArgParser.add_argument(
"package", help="Name of package we are waiting to start"
)
startArgParser.add_argument(
"--sleepTime",
default=1,
type=float,
help="Sleep time[s] between checks => 1 s default",
)
startArgParser.add_argument(
"--numAttempts", default=10, type=int, help="How much attempts to use"
)
@with_argparser(startArgParser)
def do_waitProcessStart(self, args):
"""
Wait for process to start.
If process doesn't start in a timely manner, terminate
"""
logger.info(
"waiting for process {} to start (attempts {}, sleep time {})".format(
args.package, args.numAttempts, args.sleepTime
)
)
# Try to get package PID
for i in range(args.numAttempts):
pid = self.__getPackagePid(args.package)
if pid:
logger.info("process {} alive with pid {}".format(args.package, pid))
return
# Try again
time.sleep(args.sleepTime)
logger.error("timed out waiting for process")
sys.exit(-1)
# waitProcessFinish arguments parser
finishArgParser = argparse.ArgumentParser()
finishArgParser.add_argument(
"package", help="Name of package we are waiting to finish"
)
finishArgParser.add_argument(
"--sleepTime",
default=0.2,
type=float,
help="Sleep time[s] between checks => 200 ms default",
)
@with_argparser(finishArgParser)
def do_waitProcessFinish(self, args):
"""
Wait for process to finish
"""
pid = None
logger.info(
"Waiting for process {} to finish (sleep time {})".format(
args.package, args.sleepTime
)
)
# Try to get package PID
while True:
pid = self.__getPackagePid(args.package)
if not pid:
logger.info("process {} finished".format(args.package))
return
time.sleep(args.sleepTime)
monitorProcessArgs = argparse.ArgumentParser()
monitorProcessArgs.add_argument(
"package", help="Name of package we are waiting to finish"
)
monitorProcessArgs.add_argument("outFile", help="Output file")
monitorProcessArgs.add_argument("--numSamples", help="Number of samples", type=int)
monitorProcessArgs.add_argument(
"--frequency", help="Monitor frequency in seconds", type=float, default=2
)
@with_argparser(monitorProcessArgs)
def do_monitorProcess(self, args):
targetPid = self.__getPackagePid(args.package)
if not targetPid:
raise RuntimeError("Could not find pid of package {}".format(args.package))
logger.info("monitoring pid {}/{} ..".format(args.package, targetPid))
def output(fileObj, content):
fileObj.write(content)
fileObj.flush()
logger.info(content.strip())
with open(args.outFile, "w") as fileObj:
output(fileObj, "{:15} {:15}\n".format("CPU", "Memory(MiB)"))
counter = 0
while True:
# Start measuring time
iterationStartTime = time.time()
counter += 1
if args.numSamples and counter == args.numSamples:
break
procInfo = self.__getProcInfo(targetPid)
memInfo = self.__getMemInfo(args.package)
output(
fileObj,
"{:15.2f} {:15.2f}\n".format(procInfo.cpu, memInfo.total / 1024.0),
)
# Sleep if needed
iterationDuration = time.time() - iterationStartTime
sleepTime = args.frequency - iterationDuration
if sleepTime > 0:
time.sleep(sleepTime)
def __getMemInfo(self, package):
java = None
native = None
code = None
total = None
for i in self.__adb.shell(
["dumpsys", "meminfo", package]
).stdoutStr.splitlines():
line = i.strip()
tokens = line.split(":")
if line.startswith("Java Heap:"):
java = int(tokens[1])
elif line.startswith("Native Heap:"):
native = int(tokens[1])
elif line.startswith("Code:"):
code = int(tokens[1])
elif line.startswith("TOTAL:"):
total = int(tokens[1].strip().split(" ")[0])
return MemInfo(java, native, code, total)
def __getProcInfo(self, targetPid):
for i in self.__adb.shell(
["top", "-n", 1, "-o", "PID,%CPU,%MEM"]
).stdoutStr.splitlines():
tokens = i.split(" ")
tokens = [i.strip() for i in tokens if i]
if len(tokens) != 3:
continue
pid = int(tokens[0])
cpu = float(tokens[1])
mem = float(tokens[2])
if pid == targetPid:
return ProcInfo(cpu, mem)
def __getSystemInfo(self):
for i in self.__adb.shell(["cat", "/proc/meminfo"]).stdoutStr.splitlines():
tokens = [i for i in i.split(" ") if i.strip()]
if tokens[0] == "MemFree:":
return SystemInfo(int(tokens[1]))
def __getPackagePid(self, targetPackage):
"""
Get PID of the package with given name
"""
for package, pid in self.__getActiveProcesses():
if package == targetPackage:
return int(pid)
return None
def __getActiveProcesses(self):
"""
Get a list of active processes and their PIDS
"""
activeProcesses = []
psLines = self.__adb.shell(["ps"]).stdoutStr.splitlines()[1:]
for line in psLines:
tokens = line.split(" ")
tokens = [i for i in tokens if i.strip()]
pid = int(tokens[1])
package = tokens[-1]
activeProcesses.append((package, pid))
return activeProcesses
def perror(self, final_msg, end, apply_style):
"""
A cmd2 override. If any of the commands fail, abort execution right away
TODO There's probably a better way of doing this ..
"""
logger.error("exception ocurred:\n" + final_msg)
sys.exit(-1)
def main():
logging.basicConfig(
level=logging.INFO,
format="[adb] [%(asctime)s.%(msecs)03d] %(levelname)s/%(name)s: %(message)s",
datefmt="%I:%M:%S",
)
return App().cmdloop()
if __name__ == "__main__":
sys.exit(main())
|
<filename>tabulate/t_print.py
#!/usr/bin/python
from model import Table
def t_print(table, \
table_name = None, \
footer_str = None, \
corner = '+', \
column_sep = '|', \
row_sep = '-', \
box_width = 0, \
col_width_adjust = 2, \
title_row_sep = '=', \
show_col = False,\
col_sort_function = None):
# get the columns
col_keys = table.column_keys()
# sort the columns, if requested
if col_sort_function is not None:
col_keys.sort(cmp = col_sort_function)
# calulate max width per column
col_min_width_map = dict(((col, max(reduce(max, table.iter_column(col, lambda _, _1, v: len(str(v)))), len(col) if show_col else -1)) for col in col_keys))
# get stateless lines
inter_row_line = reduce(lambda acc, next_str : acc + next_str, map(lambda width: corner + row_sep * (width + col_width_adjust), map(col_min_width_map.get, col_keys)), "") + corner
title_line = reduce(lambda acc, next_str : acc + next_str, map(lambda width: corner + title_row_sep * (width + col_width_adjust), map(col_min_width_map.get, col_keys)), "") + corner
intra_row_line = reduce(lambda acc, next_str : acc + next_str, map(lambda width: column_sep + " " * (width + col_width_adjust), map(col_min_width_map.get, col_keys)), "") + column_sep
# format for each row
row_line = reduce(lambda acc, next_str : acc + next_str, map(lambda (col, width): column_sep + "{%s!s:^%ds}" % (col, width + col_width_adjust), map(lambda col: (col, col_min_width_map.get(col)), col_keys)), "") + column_sep
# print the title
if table_name is not None:
print title_line
print ("|{table_name:^%ds}|" % (len(inter_row_line) - 2)).format(table_name=table_name)
print title_line
else:
print inter_row_line
# show column names if requested
if show_col:
print row_line.format(**dict(((col, col) for col in col_keys)))
print inter_row_line
# print the rows
for rowkey in table.row_keys():
print row_line.format(**table.row(rowkey))
print inter_row_line
# print the footer
if footer_str is not None:
print ("|{footer:^%ds}|" % (len(inter_row_line) - 2)).format(footer=footer_str)
print title_line
def dumplod(data, table_name = None, footer_str = None, show_col = False ,col_sort_function = None):
if data == []:
return
t = Table()
for (row_index, row) in enumerate(data):
t.set_row('row' + str(row_index), dict([(str(col), val) for (col, val) in row.iteritems()]))
t_print(t, table_name, footer_str, show_col = show_col, col_sort_function = col_sort_function)
def dumplol(data, table_name = None, footer_str = None):
if data == []:
return
t = Table()
for (row_index, row) in enumerate(data):
t.set_row('row' + str(row_index), dict(map(lambda (col_index, col_val): ('col' + str(col_index), col_val), enumerate(row))))
t_print(t, table_name, footer_str, col_sort_function = lambda c1, c2: int(c1[3:]) - int(c2[3:]))
|
from .isolationTester import Session, Step, Permutation, TestSpec, Pstep, Blocker
from .parser.specParserVisitor import specParserVisitor
# Generated from specParser.g4 by ANTLR 4.9.3
from antlr4 import *
from .parser.specParser import specParser
# This class defines a complete generic visitor for a parse tree produced by specParser.
class specParserVisitorImpl(specParserVisitor):
def __init__(self):
self.parentnode_stk = []
self.steps_defined = {}
self.testSpec = TestSpec()
# Visit a parse tree produced by specParser#parse.
def visitParse(self, ctx:specParser.ParseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by specParser#self.testSpec.
def visitTestspec(self, ctx:specParser.TestspecContext):
self.parentnode_stk.append(self.testSpec)
for setupsql in ctx.setup():
self.testSpec.setupsqls.append(trimSQLBLOCK(setupsql.SQLBLOCK().getText()))
if(ctx.teardown() is not None):
self.testSpec.teardownsql = trimSQLBLOCK(ctx.teardown().SQLBLOCK().getText())
for session_child in ctx.session():
self.visitSession(session_child)
for permutation_child in ctx.permutation():
self.visitPermutation(permutation_child)
self.parentnode_stk.pop()
return
# Visit a parse tree produced by specParser#setup.
def visitSetup(self, ctx:specParser.SetupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by specParser#teardown.
def visitTeardown(self, ctx:specParser.TeardownContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by specParser#session.
def visitSession(self, ctx:specParser.SessionContext):
session = Session(name = ctx.ID().getText())
session.parentTestSpec = self.parentnode_stk[-1]
self.parentnode_stk.append(session)
if(ctx.setup() is not None):
session.setupsqls.append(trimSQLBLOCK(ctx.setup().SQLBLOCK().getText()))
if(ctx.teardown() is not None):
session.teardownsql = trimSQLBLOCK(ctx.teardown().SQLBLOCK().getText())
for step_ctx in ctx.step():
self.visitStep(step_ctx)
self.parentnode_stk.pop()
self.parentnode_stk[-1].sessions.append(session)
return
# Visit a parse tree produced by specParser#pstep.
def visitPstep(self, ctx:specParser.PstepContext):
pstep = Pstep(parentPermutation=self.parentnode_stk[-1])
self.parentnode_stk.append(pstep)
step_id = ctx.ID().getText()
step_lookup_res = self.steps_defined.get(step_id)
if(step_lookup_res is None):
raise Exception("ParsingError : Undefine step found "+ step_id)
else :
pstep.step = step_lookup_res
if(ctx.blockers() is not None):
self.visitBlockers(ctx.blockers())
else:
pstep.blocker = Blocker()
self.parentnode_stk.pop()
self.parentnode_stk[-1].psteps.append(pstep)
return
# Visit a parse tree produced by specParser#blockers.
def visitBlockers(self, ctx:specParser.BlockersContext):
blocker = Blocker()
if ctx.AST():
blocker.isFirstTryBlocker = True
for otherBlockerStepId in ctx.ID():
stepLookupRes = self.steps_defined.get(otherBlockerStepId.getText())
if(stepLookupRes is None):
raise Exception("ParsingError : Undefine step found "+otherBlockerStepId.getText())
else:
blocker.otherStepBlocker.append(stepLookupRes)
self.parentnode_stk[-1].blocker = blocker
return
# Visit a parse tree produced by specParser#permutation.
def visitPermutation(self, ctx:specParser.PermutationContext):
permutation = Permutation()
permutation.parentTestSpec = self.parentnode_stk[-1]
self.parentnode_stk.append(permutation)
for pstep in ctx.pstep():
self.visitPstep(pstep)
self.parentnode_stk.pop()
self.parentnode_stk[-1].permutations.append(permutation)
return
# Visit a parse tree produced by specParser#step.
def visitStep(self, ctx:specParser.StepContext):
old_step_lookup = self.steps_defined.get(ctx.ID().getText())
if old_step_lookup is not None:
raise Exception("ParsingError : Steps already defined "+ctx.ID().getText())
step = Step(ctx.ID().getText(), trimSQLBLOCK(ctx.SQLBLOCK().getText()), self.parentnode_stk[-1])
self.steps_defined[step.name] = step
self.parentnode_stk[-1].steps.append(step)
return
del specParser
def trimSQLBLOCK(text):
return text[1:-1].strip()
|
import os
import sys
import argparse
import roblib
__author__ = '<NAME>'
def read_blast_file(filename, query=True, evalue=10, bitscore=0):
"""
Read the blast output file and return a dict of hits that has contig, start, stop.
# crAssphage_C NODE_1_length_14386_cov_54.5706_ID_1703 94.64 1157 62 0 82 1238 4975 3819 0.0 1794 1238 14386
Using -outfmt '6 std qlen slen' the columns are:
0: query
1: database
2: percent id
3: alignment length
4: gaps
5: mismatches
6: query start
7: query end
8: database start
9; database end
10: e value
11: bit score
12: query len
13: subject len
:param query: Retrieve hits from the query sequence (if false, we'll get them from the database sequence)
:type query: bool
:param bitscore: minimum bitscore to be included as a hit
:type bitscore: int
:param evalue: maximum E value to be included as a hit
:type evalue: float
:param filename: blast output filename (in tab separated text format)
:type filename: str
:return: dictionary of contigs, starts and stops for all hits
:rtype: dict
"""
if not os.path.exists(filename):
sys.exit("{} not found\n".format(filename))
hits = {}
with open(filename, 'r') as fin:
for l in fin:
p = l.strip().split("\t")
for i in range(3, len(p)):
if i == 2 or i == 10 or i == 11:
p[i] = float(p[i])
else:
p[i] = int(p[i])
if p[11] < bitscore:
continue
if p[10] > evalue:
continue
if query:
hitname = p[1]
contig, start, end = p[0], p[6], p[7]
else:
hitname = p[0]
contig, start, end = p[1], p[8], p[9]
if contig not in hits:
hits[contig] = []
rc = False
if start > end:
start, end, rc = end, start, True
start -= 1
else:
start -= 1
hits[contig].append((start, end, rc, hitname))
return hits
def extract_sequences(fastafile, hits, addhitname=False):
"""
Extract the sequences from a fasta file
:param fastafile: The fasta file to get the sequences from
:type fastafile: str
:param hits: The dict of hits using contig, start, end
:type hits: dict
:return: A dict of the sequences with contig_start_end as ID and sequence as value
:rtype: dict
"""
sequences = {}
if not os.path.exists(fastafile):
sys.exit("{} not found\n".format(fastafile))
fa = roblib.read_fasta(fastafile)
for contig in hits:
if contig not in fa:
sys.stderr.write("WARNING: {} was not found in {}\n".format(contig, fastafile))
for tple in hits[contig]:
seq = fa[contig][tple[0]:tple[1]]
if tple[2]:
seq = roblib.rc(seq)
loc = "_".join(map(str, [contig, tple[0]+1, tple[1]]))
if addhitname:
loc += " [hit={}]".format(tple[3])
sequences[loc] = seq
return sequences
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='extract sequences based on blast hits')
parser.add_argument('-b', help='blast output file', required=True)
parser.add_argument('-f', help='fasta file', required=True)
parser.add_argument('-d', help='use database sequences (default: query sequences', action="store_true")
parser.add_argument('-e', help='Maximum evalue (default = 10)', type=float)
parser.add_argument('-s', help='Minimum bit score (default = 0)', type=float)
parser.add_argument('-i', help='Add database (query) ID to output', action='store_true', default=False)
args = parser.parse_args()
usequery = not args.d
useeval = 10
usebits = 0
if args.e:
useeval = args.e
if args.s:
usebits = args.s
blasthits = read_blast_file(args.b, query=usequery, evalue=useeval, bitscore=usebits)
blastseqs = extract_sequences(args.f, blasthits, args.i)
for i in blastseqs:
print(">{}\n{}".format(i, blastseqs[i])) |
NOTE_OFF_STATUS = 128
NOTE_ON_STATUS = 144
CC_STATUS = 176
NUM_NOTES = 127
NUM_CC_NO = 127
NUM_CHANNELS = 15
NUM_PAGES = 4
PAGES_NAMES = (('P', 'o', 's', 'i', 't', 'i', 'o', 'n', ' ', '&', ' ', 'T', 'e', 'm', 'p', 'o'),
('C', 'l', 'i', 'p', ' ', '&', ' ', 'T', 'e', 'm', 'p', 'o'),
('V', 'o', 'l', 'u', 'm', 'e', ' ', '&', ' ', 'P', 'a', 'n', 'n', 'i', 'n', 'g'),
('L', 'o', 'o', 'p', ' ', 'S', 'e', 't', 't', 'i', 'n', 'g', 's'),
('S', 'e', 'n', 'd', ' ', 'S', 'e', 't', 't', 'i', 'n', 'g', 's'))
TRANZ_NATIVE_MODE = (240, 0, 1, 64, 16, 1, 0, 247)
TRANZ_TRANS_SECTION = range(91, 96)
TRANZ_RWD = 91
TRANZ_FFWD = 92
TRANZ_STOP = 93
TRANZ_PLAY = 94
TRANZ_REC = 95
TRANZ_PREV_TRACK = 48
TRANZ_NEXT_TRACK = 49
TRANZ_ARM_TRACK = 0
TRANZ_MUTE_TRACK = 16
TRANZ_SOLO_TRACK = 8
TRANZ_ANY_SOLO = 115
TRANZ_TRACK_SECTION = (TRANZ_PREV_TRACK,
TRANZ_NEXT_TRACK,
TRANZ_ARM_TRACK,
TRANZ_MUTE_TRACK,
TRANZ_SOLO_TRACK,
TRANZ_ANY_SOLO)
TRANZ_LOOP = 86
TRANZ_PUNCH_IN = 87
TRANZ_PUNCH_OUT = 88
TRANZ_PUNCH = 120
TRANZ_LOOP_SECTION = (TRANZ_LOOP,
TRANZ_PUNCH_IN,
TRANZ_PUNCH_OUT,
TRANZ_PUNCH)
TRANZ_PREV_CUE = 84
TRANZ_ADD_CUE = 82
TRANZ_NEXT_CUE = 85
TRANZ_CUE_SECTION = (TRANZ_PREV_CUE, TRANZ_ADD_CUE, TRANZ_NEXT_CUE)
TRANZ_UNDO = 76
TRANZ_SHIFT = 121
TRANZ_DICT = {'0': 48,
'1': 49,
'2': 50,
'3': 51,
'4': 52,
'5': 53,
'6': 54,
'7': 55,
'8': 56,
'9': 57,
'A': 65,
'B': 66,
'C': 67,
'D': 68,
'E': 69,
'F': 70,
'G': 71,
'H': 72,
'I': 73,
'J': 74,
'K': 75,
'L': 76,
'M': 77,
'N': 78,
'O': 79,
'P': 80,
'Q': 81,
'R': 82,
'S': 83,
'T': 84,
'U': 85,
'V': 86,
'W': 87,
'X': 88,
'Y': 89,
'Z': 90,
'a': 97,
'b': 98,
'c': 99,
'd': 100,
'e': 101,
'f': 102,
'g': 103,
'h': 104,
'i': 105,
'j': 106,
'k': 107,
'l': 108,
'm': 109,
'n': 110,
'o': 111,
'p': 112,
'q': 113,
'r': 114,
's': 115,
't': 116,
'u': 117,
'v': 118,
'w': 119,
'x': 120,
'y': 121,
'z': 122,
'@': 64,
' ': 32,
'.': 46,
',': 44,
':': 58,
';': 59,
'<': 60,
'>': 62,
'[': 91,
']': 93,
'_': 95,
'-': 16,
'|': 124,
'&': 38}
SYSEX_START = (240, 0, 32, 51, 1, 16, 114, 0, 29)
SYSEX_END = (247,)
CLEAR_LINE = (32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
LED_ON = 127
LED_OFF = 0 |
import copy
import numpy as np
class Particle:
def __init__(self, x_0, v):
self.x = x_0
self.v = v
self.n_constraints = None
self.p_best = None
self.pos_p_best = None
self.n_constraints_best = None
class Swarm:
def __init__(self, function, dimensions, bounds, constraints=None, n_particle=40, iterations=1000, args=None):
self.function = function
self.constraints = constraints
self.swarm = []
self.n_particle = n_particle
self.g = None
self.dimensions = dimensions
self.iterations = iterations
self.bounds = bounds
self.args = args
# generate the swarm positions and velocities
#self.__generate_swarm()
self.__generate_swarm_heuristic() #modified initialization
# assign best values and positions
self.__p_best()
# best in the neighborhood
self.__best_neighborhood()
k = 0
non_improving = 0
eps = 0.01
while k < (iterations-1) and non_improving < 5:
current_best = copy.deepcopy(self.g.p_best)
self.__update_velocities_positions(k)
self.__p_best()
self.__best_neighborhood()
if 0 < abs(current_best - self.g.p_best) < eps:
non_improving += 1
k += 1
def __generate_swarm(self):
vel = vel = np.random.rand(self.dimensions)+1
for _ in range(self.n_particle):
random_point = np.empty(self.dimensions)
for i in range(self.dimensions):
random_point[i] = np.random.rand() * (abs(self.bounds[i][1] - self.bounds[i][0])) + self.bounds[i][0]
self.swarm.append(Particle(random_point, vel))
def __generate_swarm_heuristic(self):
vel = np.random.rand(self.dimensions)+1
random_point = self.__random_point()
while not self.__is_feasible(random_point):
random_point = self.__random_point()
self.swarm.append(Particle(random_point, vel))
for _ in range(self.n_particle-1):
random_point = self.__random_point()
if not self.__is_feasible(random_point):
feasible_particle_pos = self.swarm[np.random.randint(len(self.swarm)) - 1].x
while not self.__is_feasible(random_point):
random_point = 0.6*random_point + 0.4*feasible_particle_pos
self.swarm.append(Particle(random_point, vel))
# generate velocities
def __update_velocities_positions(self, k):
w = (1.2 - 0.1) * ((self.iterations-(k+1))/self.iterations) + 0.1 # Time-Varying Inertia Weight (TVIW)
c1 = 2.05 #cognitive parameter
c2 = 2.05 #social parameter
phi = c1 + c2
chi = 2.0/abs((2 - phi - np.sqrt(phi**2 - 4 * phi))) # constriction factor
for particle in self.swarm:
U_1 = np.random.rand()
U_2 = np.random.rand()
local_adjustment = c1 * U_1 * (particle.pos_p_best - particle.x)
global_adjustment = c2 * U_2 * (self.g.pos_p_best - particle.x)
particle.v = chi * (w * particle.v + local_adjustment + global_adjustment)
particle.x += particle.v
'''
Assign best values and positions so that:
Any feasible solution is preferred to any infeasible solution.
Between two feasible solutions, the one having better objective function value is preferred.
Between two infeasible solutions, the one having smaller constraint violation is preferred.
'''
def __p_best(self):
for particle in self.swarm:
if self.args is None:
cost = self.function(particle.x)
else:
cost = self.function(particle.x, *self.args)
particle.n_constraints = self.__n_constraints_unsatisfied(particle.x)
if particle.p_best is None:
particle.p_best = cost
particle.pos_p_best = copy.deepcopy(particle.x)
particle.n_constraints_best = copy.deepcopy(particle.n_constraints)
continue
if particle.n_constraints < particle.n_constraints_best:
particle.p_best = cost
particle.pos_p_best = copy.deepcopy(particle.x)
particle.n_constraints_best = copy.deepcopy(particle.n_constraints)
continue
if particle.n_constraints == 0 and particle.n_constraints == particle.n_constraints_best and cost < particle.p_best:
particle.p_best = cost
particle.pos_p_best = copy.deepcopy(particle.x)
particle.n_constraints_best = copy.deepcopy(particle.n_constraints)
# the best particle among all particles (global neighborhood)
def __best_neighborhood(self):
for particle in self.swarm:
if self.g is None:
self.g = copy.deepcopy(particle)
continue
if particle.n_constraints_best < self.g.n_constraints_best:
self.g = copy.deepcopy(particle)
continue
if particle.n_constraints_best == 0 and particle.n_constraints_best == self.g.n_constraints_best and particle.p_best < self.g.p_best:
self.g = copy.deepcopy(particle)
def __n_constraints_unsatisfied(self, point):
eps = 0.0001
n_constraints = 0
for cons in self.constraints:
type = cons['type']
fun = cons['fun']
args = cons['args']
if type =="ineq":
if not fun(point, *args) > 0:
n_constraints += 1
if type == "eq":
if not abs(fun(point, *args)) <= eps:
n_constraints += 1
return n_constraints
def __is_feasible(self, point):
return self.__n_constraints_unsatisfied(point) == 0
def __random_point(self):
random_point = np.empty(self.dimensions)
for i in range(self.dimensions):
random_point[i] = np.random.rand() * (abs(self.bounds[i][1] - self.bounds[i][0])) + self.bounds[i][0]
return random_point
|
<filename>asana_extensions/general/config.py<gh_stars>0
#!/usr/bin/env python3
"""
This module handles access to the configuration files. The configuration
files--including the environment files--are accessed by the other python scripts
through this file.
This is setup such that other files need only call the `get()` functions, and
all the loading and caching will happen automatically internal to this file.
As of right now, this is hard-coded to access configuration files at a specific
name and path.
Module Attributes:
N/A
(C) Copyright 2021 <NAME>. All Rights Reserved Worldwide.
"""
import configparser
from enum import Enum
import itertools
import logging
import os.path
from asana_extensions.general import dirs
class UnsupportedFormatError(Exception):
"""
Raised when parsing any argument and it is in an invalid format (and is not
covered by a more specific or more appropriate error).
"""
def read_conf_file_fake_header(conf_rel_file, conf_base_dir=None,
fake_section='fake',):
"""
Read config file in configparser format, but insert a fake header for
first section. This is aimed at files that are close to configparser
format, but do not have a section header for the first section.
The fake section name is not important.
Args:
conf_rel_file (str): Relative file path to config file.
conf_base_dir (str): Base file path to use with relative path. If not
provided, this will use the absolute path of this module.
fake_section (str): Fake section name, if needed.
Returns:
parser (ConfigParser): ConfigParser for file loaded.
"""
if conf_base_dir is None:
conf_base_dir=dirs.get_conf_path()
conf_file = os.path.join(conf_base_dir, conf_rel_file)
parser = configparser.ConfigParser()
with open(conf_file, encoding="utf_8") as file:
parser.read_file(itertools.chain(['[' + fake_section + ']'], file))
return parser
def read_conf_file(conf_rel_file, conf_base_dir=None):
"""
Read config file in configparser format.
Args:
conf_rel_file (str): Relative file path to config file.
conf_base_dir (str): Base file path to use with relative path. If not
provided, this will use the absolute path of this module.
Returns:
parser (ConfigParser): ConfigParser for file loaded.
"""
if conf_base_dir is None:
conf_base_dir=dirs.get_conf_path()
conf_file = os.path.join(conf_base_dir, conf_rel_file)
parser = configparser.ConfigParser()
parser.read(conf_file)
return parser
class CastType(Enum):
"""
Enum of cast types.
These are used to specify a target type when casting in `castVar()`.
"""
INT = 'int'
FLOAT = 'float'
STRING = 'string'
def cast_var(var, cast_type, fallback_to_original=False):
"""
Cast variable to the specified type.
Args:
var (*): Variable of an unknown type.
cast_type (CastType): Type that var should be cast to, if possible.
fallback_to_original (bool): If true, will return original var if cast
fails; otherwise, failed cast will raise exception.
Returns:
var (CastType, or ?): Same as var provided, but of the type specified by
CastType; but if cast failed and fallback to original was true, will
return original var in original type.
Raises:
(TypeError): Cannot cast because type specified is not supported.
(ValueError): Cast failed and fallback to original was not True.
"""
try:
if cast_type == CastType.INT:
return int(var)
if cast_type == CastType.FLOAT:
return float(var)
if cast_type == CastType.STRING:
return str(var)
raise TypeError('Cast failed -- unsupported type.')
except (TypeError, ValueError):
if fallback_to_original:
return var
raise
def parse_list_from_conf_string(conf_str, val_type, delim=',',
delim_newlines=False, strip_quotes=False):
"""
Parse a string into a list of items based on the provided specifications.
Args:
conf_str (str): The string to be split.
val_type (CastType): The type to cast each element to.
delim (str or None): The delimiter on which to split conf_str. If not
using a character string delimiter, can set to None. Can be used with
`delim_newlines`.
delim_newlines (bool): Whether to split on newlines. Can be used with
`delim`.
strip_quotes (bool): Whether or not there are quotes to be stripped from
each item after split and strip.
Returns:
list_out (list of val_type): List of all elements found in conf_str after
splitting on delim and/or delim_newlines. Each element will be of
val_type. This will silently skip any element that cannot be cast or
results in an empty string.
"""
if not conf_str:
return []
if delim_newlines:
val_raw_lines_list = conf_str.splitlines()
else:
val_raw_lines_list = [conf_str]
if delim is None:
val_raw_list = val_raw_lines_list
else:
val_raw_list = []
for line in val_raw_lines_list:
val_raw_list.extend(line.split(delim))
list_out = []
for val in val_raw_list:
try:
if strip_quotes:
val = val.strip().strip('\'"')
cast_val = cast_var(val.strip(), val_type)
if cast_val != '':
list_out.append(cast_val)
except (ValueError, TypeError):
# may have been a blank line without a delim
pass
return list_out
class LevelFilter(logging.Filter): # pylint: disable=too-few-public-methods
"""
A logging filter for the level to set min and max log levels for a handler.
While the min level is redundant given logging already implements this with
the base level functionality, the max level adds a new control.
Class Attributes:
N/A
Instance Attributes:
_min_exc_levelno (int or None): The min log level above which is to be
included (exclusive). Can be None to skip min level check.
_max_inc_levelno (int or None): The max log level below which is to be
included (inclusive). Can be None to skip max level check.
"""
def __init__(self, min_exc_level=None, max_inc_level=None):
"""
Creates the level filter.
Args:
min_exc_level (int/str/None): The min log level above which is to be
inclued (exclusive). Can be provided as the int level number or as
the level name. Can be omitted/None to disable filtering the min
level.
max_inc_level (int/str/None): The max log level below which is to be
inclued (inclusive). Can be provided as the int level number or as
the level name. Can be omitted/None to disable filtering the max
level.
"""
try:
self._min_exc_levelno = int(min_exc_level)
except ValueError:
# Level name dict is bi-directional lookup -- See python source
self._min_exc_levelno = logging.getLevelName(min_exc_level.upper())
except TypeError:
self._min_exc_levelno = None
try:
self._max_inc_levelno = int(max_inc_level)
except ValueError:
# Level name dict is bi-directional lookup -- See python source
self._max_inc_levelno = logging.getLevelName(max_inc_level.upper())
except TypeError:
self._max_inc_levelno = None
super().__init__()
def filter(self, record):
"""
Filters the provided record according to the logic in this method.
Args:
record (LogRecord): The log record that is being checked whether to
log.
Returns:
(bool): True if should log; False to drop.
"""
if self._min_exc_levelno is not None \
and record.levelno <= self._min_exc_levelno:
return False
if self._max_inc_levelno is not None \
and record.levelno > self._max_inc_levelno:
return False
return True
|
<filename>time_measure.py<gh_stars>10-100
from umap import umap_
import numpy as np
import timeit
from gensim.models.keyedvectors import KeyedVectors
from utils import pca, run_umap, run_umap2, run_tsne, draw_plot, load_merge_cifar, load_merge_mnist
# from sklearn.datasets import load_digits
timeit.template = """
def inner(_it, _timer{init}):
{setup}
_t0 = _timer()
for _i in _it:
retval = {stmt}
_t1 = _timer()
return _t1 - _t0, retval
"""
if __name__ == "__main__":
# # TOY DATA # (1797, 64)
# from sklearn.datasets import load_digits
# digits = load_digits()
# umap_.UMAP(n_neighbors=5, min_dist=0.3, local_connectivity=1, metric='correlation', verbose=True).fit_transform(digits.data)
# FASHION MNIST (6-70000, 784), 26MB
# https://github.com/zalandoresearch/fashion-mnist
x, y = load_merge_mnist()
# x = pca(x, no_dims=300).real
item = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
# UMAP run
# run_umap(x=x, y=y, item=item, n_neighbors_list=[5])
# run_umap(x=x, y=y, item=item, n_neighbors_list=[2,5,10,20,50])
# run_umap2(x=x, y=y, item=item, min_dist_list=[0.1,0.05, 0.01])
x_umap = umap_.UMAP(n_neighbors=5, min_dist=0.3, metric='correlation', verbose=True).fit_transform(x)
draw_plot(x_umap, y, item, "umap_result")
# t-SNE run
# x_tse = run_tsne(x)
# draw_plot(x_tse, y, item, "tsne_result")
# CIFAR 10 (60000, 3072), 163MB
# http://www.cs.toronto.edu/~kriz/cifar.html
# x2, y2 = load_merge_cifar()
# item2 = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# UMAP run
# run_umap(x=x2, y=y2, item=item2, n_neighbors_list=[5,20,50,100,200])
# x_umap2 = umap_.UMAP(n_neighbors=5, min_dist=0.3, metric='correlation', verbose=True).fit_transform(x2)
# draw_plot(x_umap2, y2, item2, "umap_result2")
# # t-SNE run
# x_tse2 = run_tsne(x2)
# draw_plot(x_tse2, y2, item2, "tsne_result2")
# # WORD VECTOR (0.6M-3M, 300), 3.35GB
# # https://www.kaggle.com/sandreds/googlenewsvectorsnegative300
# word_vectors = KeyedVectors.load_word2vec_format('./data/google/GoogleNews-vectors-negative300.bin', binary=True)
# x3 = word_vectors.vectors[:600000,] # wv.shape (3,000,000, 300) -> (600,000, 300)
# # UMAP run
# x_umap3 = umap_.UMAP(n_neighbors=5, min_dist=0.3, metric='correlation', verbose=True).fit_transform(x3)
# # t-SNE run
# x_tse3 = run_tsne(x3)
# plotData = data[33]
# plotData = plotData.reshape(28, 28)
# plt.gray()
# plt.imshow(plotData)
# plt.show()
|
from __future__ import annotations
import collections
import os
import shutil
import sys
import sysconfig
from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple
from pip._internal.req import req_uninstall
from pip._internal.utils import misc
from pip._vendor import packaging, pkg_resources
from pip_shims import shims
from pythonfinder import Finder
from pythonfinder.environment import PYENV_INSTALLED, PYENV_ROOT
from vistir.contextmanagers import temp_environ
from vistir.path import normalize_path
from pdm.context import context
from pdm.exceptions import NoPythonVersion
from pdm.utils import (
allow_all_wheels,
cached_property,
convert_hashes,
create_tracked_tempdir,
get_finder,
get_pep508_environment,
get_python_version,
)
if TYPE_CHECKING:
from pdm.models.specifiers import PySpecSet
from pdm.project.config import Config
from pdm._types import Source
class WorkingSet(collections.abc.Mapping):
"""A dict-like class that holds all installed packages in the lib directory."""
def __init__(
self,
paths: Optional[List[str]] = None,
python: Tuple[int, ...] = sys.version_info[:3],
):
self.env = pkg_resources.Environment(paths, python=python)
self.pkg_ws = pkg_resources.WorkingSet(paths)
self.__add_editable_dists()
def __getitem__(self, key: str) -> pkg_resources.Distribution:
rv = self.env[key]
if rv:
return rv[0]
else:
raise KeyError(key)
def __len__(self) -> int:
return len(self.env._distmap)
def __iter__(self) -> Iterator[str]:
for item in self.env:
yield item
def __add_editable_dists(self):
"""Editable distributions are not present in pkg_resources.WorkingSet,
Get them from self.env
"""
missing_keys = [key for key in self if key not in self.pkg_ws.by_key]
for key in missing_keys:
self.pkg_ws.add(self[key])
class Environment:
"""Environment dependent stuff related to the selected Python interpreter."""
def __init__(self, python_requires: PySpecSet, config: Config) -> None:
"""
:param python_requires: the project's python requires constraint.
:param config: the project's configuration.
"""
self.python_requires = python_requires
self.config = config
@cached_property
def python_executable(self) -> str:
"""Get the Python interpreter path."""
if self.config.get("python.path"):
path = self.config["python.path"]
try:
get_python_version(path)
return path
except Exception:
pass
if PYENV_INSTALLED and self.config.get("python.use_pyenv", True):
return os.path.join(PYENV_ROOT, "shims", "python")
# First try what `python` refers to.
path = shutil.which("python")
version = None
if path:
version = get_python_version(path, True)
if not version or not self.python_requires.contains(version):
finder = Finder()
for python in finder.find_all_python_versions():
version = get_python_version(python.path.as_posix(), True)
if self.python_requires.contains(version):
path = python.path.as_posix()
break
else:
version = ".".join(map(str, sys.version_info[:3]))
if self.python_requires.contains(version):
path = sys.executable
if path:
context.io.echo(
"Using Python interpreter: {} ({})".format(
context.io.green(path), version
)
)
self.config["python.path"] = Path(path).as_posix()
self.config.save_config()
return path
raise NoPythonVersion(
"No Python that satisfies {} is found on the system.".format(
self.python_requires
)
)
def get_paths(self) -> Dict[str, str]:
"""Get paths like ``sysconfig.get_paths()`` for installation."""
paths = sysconfig.get_paths()
scripts = "Scripts" if os.name == "nt" else "bin"
packages_path = self.packages_path
paths["platlib"] = paths["purelib"] = (packages_path / "lib").as_posix()
paths["scripts"] = (packages_path / scripts).as_posix()
paths["data"] = paths["prefix"] = packages_path.as_posix()
paths["include"] = paths["platinclude"] = paths["headers"] = (
packages_path / "include"
).as_posix()
return paths
@contextmanager
def activate(self):
"""Activate the environment. Manipulate the ``PYTHONPATH`` and patches ``pip``
to be aware of local packages. This method acts like a context manager.
"""
paths = self.get_paths()
with temp_environ():
old_paths = os.getenv("PYTHONPATH")
if old_paths:
new_paths = os.pathsep.join([paths["purelib"], old_paths])
else:
new_paths = paths["purelib"]
os.environ["PYTHONPATH"] = new_paths
python_root = os.path.dirname(self.python_executable)
os.environ["PATH"] = os.pathsep.join(
[python_root, paths["scripts"], os.environ["PATH"]]
)
working_set = self.get_working_set()
_old_ws = pkg_resources.working_set
pkg_resources.working_set = working_set.pkg_ws
# HACK: Replace the is_local with environment version so that packages can
# be removed correctly.
_old_sitepackages = misc.site_packages
_is_local = misc.is_local
_evaluate_marker = pkg_resources.evaluate_marker
pkg_resources.evaluate_marker = self.evaluate_marker
misc.is_local = req_uninstall.is_local = self.is_local
misc.site_packages = paths["purelib"]
yield
misc.is_local = req_uninstall.is_local = _is_local
pkg_resources.working_set = _old_ws
pkg_resources.evaluate_marker = _evaluate_marker
misc.site_packages = _old_sitepackages
def is_local(self, path) -> bool:
"""PEP 582 version of ``is_local()`` function."""
return normalize_path(path).startswith(
normalize_path(self.packages_path.as_posix())
)
def evaluate_marker(self, text: str, extra=None) -> bool:
marker = packaging.markers.Marker(text)
return marker.evaluate(self.marker_environment)
@cached_property
def packages_path(self) -> Path:
"""The local packages path."""
if self.config.get("packages_path") is not None:
return self.config.get("packages_path")
pypackages = (
self.config.project_root
/ "__pypackages__"
/ ".".join(map(str, get_python_version(self.python_executable)[:2]))
)
scripts = "Scripts" if os.name == "nt" else "bin"
for subdir in [scripts, "include", "lib"]:
pypackages.joinpath(subdir).mkdir(exist_ok=True, parents=True)
return pypackages
def _make_building_args(self, ireq: shims.InstallRequirement) -> Dict[str, Any]:
src_dir = ireq.source_dir or self._get_source_dir()
if ireq.editable:
build_dir = src_dir
else:
build_dir = create_tracked_tempdir(prefix="pdm-build")
download_dir = context.cache("pkgs")
wheel_download_dir = context.cache("wheels")
return {
"build_dir": build_dir,
"src_dir": src_dir,
"download_dir": download_dir.as_posix(),
"wheel_download_dir": wheel_download_dir.as_posix(),
}
def _get_source_dir(self) -> str:
build_dir = self.packages_path
if build_dir:
src_dir = build_dir / "src"
src_dir.mkdir(exist_ok=True)
return src_dir.as_posix()
venv = os.environ.get("VIRTUAL_ENV", None)
if venv:
src_dir = os.path.join(venv, "src")
if os.path.exists(src_dir):
return src_dir
return create_tracked_tempdir("pdm-src")
@contextmanager
def get_finder(
self,
sources: Optional[List[Source]] = None,
ignore_requires_python: bool = False,
) -> shims.PackageFinder:
"""Return the package finder of given index sources.
:param sources: a list of sources the finder should search in.
:param ignore_requires_python: whether to ignore the python version constraint.
"""
sources = sources or []
python_version = get_python_version(self.python_executable)[:2]
finder = get_finder(
sources,
context.cache_dir.as_posix(),
python_version,
ignore_requires_python,
)
yield finder
finder.session.close()
def build(
self, ireq: shims.InstallRequirement, hashes: Optional[Dict[str, str]] = None
) -> str:
"""Build egg_info directory for editable candidates and a wheel for others.
:param ireq: the InstallRequirment of the candidate.
:param hashes: a dictionary of filename: hash_value to check against downloaded
artifacts.
:returns: The full path of the built artifact.
"""
from pip._internal.utils.temp_dir import global_tempdir_manager
from pdm.builders import EditableBuilder
from pdm.builders import WheelBuilder
kwargs = self._make_building_args(ireq)
with self.get_finder() as finder:
with allow_all_wheels():
# temporarily allow all wheels to get a link.
ireq.populate_link(finder, False, bool(hashes))
if not ireq.editable and not ireq.req.name:
ireq.source_dir = kwargs["build_dir"]
else:
ireq.ensure_has_source_dir(kwargs["build_dir"])
download_dir = kwargs["download_dir"]
only_download = False
if ireq.link.is_wheel:
download_dir = kwargs["wheel_download_dir"]
only_download = True
if hashes:
ireq.options["hashes"] = convert_hashes(hashes)
if not (ireq.editable and ireq.req.is_local_dir):
with global_tempdir_manager():
downloaded = shims.shim_unpack(
link=ireq.link,
download_dir=download_dir,
location=ireq.source_dir,
hashes=ireq.hashes(False),
only_download=only_download,
session=finder.session,
)
# Preserve the downloaded file so that it won't be cleared.
if downloaded and only_download:
try:
shutil.copy(downloaded, download_dir)
except shutil.SameFileError:
pass
# Now all source is prepared, build it.
if ireq.link.is_wheel:
return (context.cache("wheels") / ireq.link.filename).as_posix()
builder_class = EditableBuilder if ireq.editable else WheelBuilder
kwargs["finder"] = finder
with builder_class(ireq) as builder:
return builder.build(**kwargs)
def get_working_set(self) -> WorkingSet:
"""Get the working set based on local packages directory."""
paths = self.get_paths()
return WorkingSet(
[paths["platlib"]], python=get_python_version(self.python_executable)
)
@cached_property
def marker_environment(self) -> Dict[str, Any]:
"""Get environment for marker evaluation"""
return get_pep508_environment(self.python_executable)
def which(self, command: str) -> str:
"""Get the full path of the given executable against this environment."""
if not os.path.isabs(command) and command.startswith("python"):
python = os.path.splitext(command)[0]
version = python[6:]
this_version = get_python_version(self.python_executable, True)
if not version or this_version.startswith(version):
return self.python_executable
# Fallback to use shutil.which to find the executable
return shutil.which(command, path=os.getenv("PATH"))
|
<gh_stars>0
from functools import partial
from typing import Dict, List
import numpy as np
import argparse
import helper as Helper
def original(start: int, end: int, rank_map: Dict[int, int]) -> List:
""" Temporal evaluation metric as seen in the original HyTE code and paper."""
ranks = []
for time in range(start, end + 1):
rank, _ = rank_map[time]
ranks.append(rank)
return np.min(np.array(ranks))
def approach_2(start: int, end: int, rank_map: Dict[int, int]) -> List:
""" Approach 2: record sum of ranks of all quads in the interval. Divide by best possible sum. """
ranks = []
for time in range(start, end + 1):
rank, _ = rank_map[time]
ranks.append(rank)
interval = len(ranks)
penalty = interval * ((1 + interval) / 2)
res = np.sum(ranks) / penalty
return res
def approach_3(start: int, end: int, rank_map: Dict[int, int]) -> List:
""" Approach 3: Calculate scores of all groups of length `interval`. Returns rank of original interval."""
interval_size = end - start
# Very naive (inefficient) implementation, but it will do fine for the number of time classes
# we currently handle. Can be updated in the future to use a rolling sum.
groups = []
for time in range(0, len(rank_map) - interval_size):
score_sum = 0
for offset in range(0, interval_size + 1):
score_sum += rank_map[time + offset][1]
groups.append((time, score_sum))
# Convert to numpy array and sort according to score.
groups = np.array(groups)
groups = groups[groups[:, 1].argsort()]
penalty = len(rank_map) / len(groups)
# Return rank of the original interval. This rank is zero based
rank = np.where(groups == start)[0][0]
rank_corrected = rank * penalty
return 1 + rank_corrected
eval_metrics = {
"original": partial(original),
"2": partial(approach_2),
"3": partial(approach_3),
}
def parse_args():
parser = argparse.ArgumentParser(description="Eval model outputs")
parser.add_argument("-model", dest="model", required=True, help="Dataset to use")
parser.add_argument(
"-mode", dest="mode", required=True, choices=["valid", "test"], help="Run for validation or test set",
)
parser.add_argument(
"-test_freq", dest="freq", required=True, type=int, help="what is to be predicted",
)
parser.add_argument(
"-eval_metric",
dest="eval_metric",
required=True,
choices=eval_metrics.keys(),
help="Which temporal evaluation metric to apply.",
)
return parser.parse_args()
args = parse_args()
print(f"Evaluating {args.model} in {args.mode} mode")
for k in range(args.freq, 30000, args.freq):
try:
true_output = open(f"temp_scope/{args.model}/{args.mode}.txt")
model_time = open(f"temp_scope/{args.model}/{args.mode}_time_pred_{k}.txt")
except FileNotFoundError:
# Ran through all files, exiting.
break
predictions = Helper.parse_score_file(model_time)
# Now, the index is time. Map the (score, time) tuples to time: (rank, score).
time_rank_map = {}
for i, row in enumerate(predictions):
# Parse ranks. Increase ranks by 1 to make them 1-based instead of 0-based.
rank_score = zip(range(1, len(row) + 1), row[:, 0])
times = map(int, row[:, 1])
time_rank_map[i] = dict(zip(times, rank_score))
# Switch depending on evaluation metric used.
ranks = []
for i, row in enumerate(true_output):
start, end = list(map(int, row.split())) # Kinda hacky, but works.
ranks.append(eval_metrics[args.eval_metric](start, end, time_rank_map[i]))
ranks = np.array(ranks)
print(f"Epoch {k} : MR {np.mean(ranks)}")
print(f"Epoch {k} : MRR {np.mean(np.reciprocal(ranks))}")
# Calculate & print hits@x
for hit in [1, 3, 10]:
hits_x = len(ranks[np.where(ranks <= hit)]) / float(len(ranks))
print(f"Epoch {k} : HITS@{hit} {hits_x}")
|
<gh_stars>0
import mimetypes
import pathlib
from dataclasses import dataclass
import boto3
from botocore.exceptions import ClientError
from scripts.commands import settings
from scripts.commands.auth0_handler import management_api
BASE_DIR = pathlib.Path(__file__).resolve().parent.parent.parent
@dataclass(frozen=True)
class StaticFiles:
html_file: pathlib.Path
css_file: pathlib.Path
js_file: pathlib.Path
def retrieve_bucket(s3_resource, bucket_name):
bucket = s3_resource.Bucket(bucket_name)
bucket_does_not_exist = not bucket.creation_date
if bucket_does_not_exist:
bucket.create()
return bucket
def retrieve_buckets_cors(s3_resource, bucket_name):
has_cors = False
bucket_cors = s3_resource.BucketCors(bucket_name)
try:
if bucket_cors.cors_rules:
has_cors = True
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchCORSConfiguration":
has_cors = False
else:
raise e
return bucket_cors, has_cors
def upload_file(bucket, filename, blob_key):
# Sample about how to check it:
# curl -i https://my-honest-hosted-content-december-2021.s3.amazonaws.com/login.c20437d2.css
content_type = mimetypes.guess_type(blob_key)[0]
extra_args = {
"ACL": "public-read",
"CacheControl": "public, max-age=31536000, immutable",
"ContentType": content_type,
}
bucket.upload_file(filename, blob_key, ExtraArgs=extra_args)
def retrieve_static_files(folder, glob_pattern) -> StaticFiles:
files = pathlib.Path(f"{BASE_DIR}/{folder}").glob(glob_pattern)
js_file, css_file, html_file = None, None, None
for file in files:
if file.suffix == ".js":
js_file = file
if file.suffix == ".css":
css_file = file
if file.suffix == ".html":
html_file = file
return StaticFiles(html_file, css_file, js_file)
def load_content_as_string(file_name) -> str:
with open(file_name, mode="r", encoding="utf-8") as file:
return "".join(line.rstrip() for line in file)
def main():
s3 = boto3.resource(
"s3",
region_name=settings.AWS_S3_REGION,
aws_access_key_id=settings.AWS_SERVICE_ACCOUNT_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SERVICE_ACCOUNT_ACCESS_SECRET,
)
static_files = retrieve_static_files("out", "login.*")
print(f"Configured static files: {static_files}")
bucket = retrieve_bucket(s3, settings.BUCKET_NAME)
upload_file(bucket, str(static_files.css_file), static_files.css_file.name)
upload_file(bucket, str(static_files.js_file), static_files.js_file.name)
print(f"CSS and JS files have been uploaded")
page_as_str = load_content_as_string(str(static_files.html_file))
management_api.update_login_page_classic(settings.ALL_APPLICATIONS_CLIENT_ID, page_as_str)
print("HTML file has been updated on Auth0")
bucket_cors, has_cors = retrieve_buckets_cors(s3, settings.BUCKET_NAME)
if not has_cors:
cors_origins_allow_list = [origin for origin in settings.CORS_ALLOWED_ORIGINS.split(",")]
print(f"Applying CORS with the following ORIGINS: {cors_origins_allow_list}")
bucket_cors.put(
CORSConfiguration={
"CORSRules": [
{
"AllowedMethods": ["GET"],
"AllowedOrigins": cors_origins_allow_list,
"ExposeHeaders": ["GET"],
"MaxAgeSeconds": 3600,
},
]
},
)
|
<gh_stars>0
import unittest
import softwareprocess.angles.Longitude as Long
class LongitudeTest(unittest.TestCase):
def setUp(self):
self.longitudeTooLowStr = 'The longitude specified is too low! It must be greater than or equal to 0 degrees and 0.0 minutes'
self.longitudeTooHighStr = 'The longitude specified is too high! It must be less than 360 degrees and 0.0 minutes'
#-------
#------ Will rely on AngleTest to validate basic scenarios.
#------ The value Longitude class adds is it restricts the degree range to 0<=long<90
#------ These will test that
#------ Acceptance tests
#------ 100 Constructor
#------ Boundary value confidence
#------ input : either number or string. String is in format XdY.Y where X is number of degrees and Y.Y is number of minutes down to 1 /10 of a minute
#------ output : instance of Longitude class
#-----Happy path
#-------degreeMinutesStr -> nominal value = '12d30.5'
#-------degreeMinutesStr -> high value = '89d59.9'
#-------degreeMinutesStr -> low value = '0d0.0'
#-------degreeMinutesFloat -> nominal value = 12.5
#-------degreeMinutesFloat -> high value = 89.998
#-------degreeMinutesFloat -> low value = 0
#-----Sad path
#-------degreeMinutesStr -> value too low = '-0d0.1'
#-------degreeMinutesStr -> value too high = '90d0.0'
def test100_010_ShouldConstructLongitudeNominal(self):
long = Long.Longitude('12d30.5')
self.assertIsInstance(long, Long.Longitude)
self.assertEquals(long.getDegreeMinuteString(), '12d30.5')
self.assertAlmostEquals(long.getDegreesFloat(), 12.5083, 3)
def test100_020_ShouldConstructLongitudeHigh(self):
long = Long.Longitude('359d59.9')
self.assertIsInstance(long, Long.Longitude)
self.assertEquals(long.getDegreeMinuteString(), '359d59.9')
self.assertAlmostEquals(long.getDegreesFloat(), 359.9983, 3)
def test100_030_ShouldConstructLongitudeLow(self):
long = Long.Longitude('0d0.0')
self.assertIsInstance(long, Long.Longitude)
self.assertEquals(long.getDegreeMinuteString(), '0d0.0')
self.assertEquals(long.getDegreesFloat(), 0)
def test100_040_ShouldConstructLongitudeNominalFloat(self):
long = Long.Longitude(12.5)
self.assertIsInstance(long, Long.Longitude)
self.assertEquals(long.getDegreeMinuteString(), '12d30.0')
self.assertEquals(long.getDegreesFloat(), 12.5, 3)
def test100_050_ShouldConstructLongitudeHighFloat(self):
long = Long.Longitude(359.998)
self.assertIsInstance(long, Long.Longitude)
self.assertEquals(long.getDegreeMinuteString(), '359d59.9')
self.assertAlmostEquals(long.getDegreesFloat(), 359.998, 3)
def test100_060_ShouldConstructLongitudeLowFloat(self):
long = Long.Longitude(0)
self.assertIsInstance(long, Long.Longitude)
self.assertEquals(long.getDegreeMinuteString(), '0d0.0')
self.assertEquals(long.getDegreesFloat(), 0)
def test100_910_ShouldRaiseErrorLongitudeTooLow(self):
with self.assertRaises(ValueError) as ctx:
long = Long.Longitude('-0d0.1')
self.assertEquals(ctx.exception.args[0], self.longitudeTooLowStr)
def test100_920_ShouldRaiseErrorLongitudeTooHigh(self):
with self.assertRaises(ValueError) as ctx:
long = Long.Longitude('3600d0.0')
self.assertEquals(ctx.exception.args[0], self.longitudeTooHighStr) |
# -*- coding: utf-8 -*-
'''
Tests for the SVN state
'''
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import shutil
import socket
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP
from tests.support.mixins import SaltReturnAssertsMixin
class SvnTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the svn state
'''
def setUp(self):
super(SvnTest, self).setUp()
if not self.run_function('cmd.has_exec', ['svn']):
self.skipTest("The executable 'svn' is not available.")
self.__domain = 'svn.apache.org'
try:
if hasattr(socket, 'setdefaulttimeout'):
# 10 second dns timeout
socket.setdefaulttimeout(10)
socket.gethostbyname(self.__domain)
except socket.error:
msg = 'error resolving {0}, possible network issue?'
self.skipTest(msg.format(self.__domain))
self.target = os.path.join(TMP, 'apache_http_test_repo')
self.name = 'http://{0}/repos/asf/httpd/httpd/trunk/test/'.format(
self.__domain
)
self.new_rev = '1456987'
def tearDown(self):
shutil.rmtree(self.target, ignore_errors=True)
# Reset the dns timeout after the test is over
socket.setdefaulttimeout(None)
def test_latest(self):
'''
svn.latest
'''
ret = self.run_state(
'svn.latest',
name=self.name,
rev=self.new_rev,
target=self.target,
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(self.target, '.svn')))
self.assertSaltStateChangesEqual(
ret, self.name, keys=['new']
)
self.assertSaltStateChangesEqual(
ret, self.new_rev, keys=['revision']
)
def test_latest_failure(self):
'''
svn.latest
'''
ret = self.run_state(
'svn.latest',
name='https://youSpelledApacheWrong.com/repo/asf/httpd/trunk/',
rev=self.new_rev,
target=self.target,
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isdir(os.path.join(self.target, '.svn')))
def test_latest_empty_dir(self):
'''
svn.latest
'''
if not os.path.isdir(self.target):
os.mkdir(self.target)
ret = self.run_state(
'svn.latest',
name=self.name,
rev=self.new_rev,
target=self.target,
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(self.target, '.svn')))
def no_test_latest_existing_repo(self):
'''
svn.latest against existing repository
'''
current_rev = '1442865'
cwd, basename = os.path.split(self.target)
opts = ('-r', current_rev)
out = self.run_function('svn.checkout',
[cwd, self.name, basename, None, None, opts])
assert out
ret = self.run_state(
'svn.latest',
name=self.name,
rev=self.new_rev,
target=self.target,
)
self.assertSaltTrueReturn(ret)
self.assertSaltStateChangesEqual(
ret,
'{0} => {1}'.format(current_rev, self.new_rev),
keys=['revision']
)
self.assertTrue(os.path.isdir(os.path.join(self.target, '.svn')))
def no_test_latest_existing_repo_no_rev_change(self):
'''
svn.latest against existing repository
'''
current_rev = self.new_rev
cwd, basename = os.path.split(self.target)
opts = ('-r', current_rev)
out = self.run_function('svn.checkout',
[cwd, self.name, basename, None, None, opts])
assert out
ret = self.run_state(
'svn.latest',
name=self.name,
rev=self.new_rev,
target=self.target,
)
self.assertSaltTrueReturn(ret)
self.assertSaltStateChangesEqual(ret, {})
self.assertTrue(os.path.isdir(os.path.join(self.target, '.svn')))
|
<reponame>Zilched/docker-weewx
#
# Copyright (c) 2009-2021 <NAME> <<EMAIL>>
#
# See the file LICENSE.txt for your full rights.
#
"""weedb driver for the MySQL database"""
import decimal
import six
try:
import MySQLdb
except ImportError:
# Some installs use 'pymysql' instead of 'MySQLdb'
import pymysql as MySQLdb
from pymysql import DatabaseError as MySQLDatabaseError
else:
try:
from MySQLdb import DatabaseError as MySQLDatabaseError
except ImportError:
from _mysql_exceptions import DatabaseError as MySQLDatabaseError
from weeutil.weeutil import to_bool
import weedb
DEFAULT_ENGINE = 'INNODB'
exception_map = {
1007: weedb.DatabaseExistsError,
1008: weedb.NoDatabaseError,
1044: weedb.PermissionError,
1045: weedb.BadPasswordError,
1049: weedb.NoDatabaseError,
1050: weedb.TableExistsError,
1054: weedb.NoColumnError,
1091: weedb.NoColumnError,
1062: weedb.IntegrityError,
1146: weedb.NoTableError,
1927: weedb.CannotConnectError,
2002: weedb.CannotConnectError,
2003: weedb.CannotConnectError,
2005: weedb.CannotConnectError,
2006: weedb.DisconnectError,
2013: weedb.DisconnectError,
None: weedb.DatabaseError
}
def guard(fn):
"""Decorator function that converts MySQL exceptions into weedb exceptions."""
def guarded_fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except MySQLDatabaseError as e:
# Get the MySQL exception number out of e:
try:
errno = e.args[0]
except (IndexError, AttributeError):
errno = None
# Default exception is weedb.DatabaseError
klass = exception_map.get(errno, weedb.DatabaseError)
raise klass(e)
return guarded_fn
def connect(host='localhost', user='', password='', database_name='',
driver='', port=3306, engine=DEFAULT_ENGINE, autocommit=True, **kwargs):
"""Connect to the specified database"""
return Connection(host=host, port=int(port), user=user, password=password,
database_name=database_name, engine=engine, autocommit=autocommit, **kwargs)
def create(host='localhost', user='', password='', database_name='',
driver='', port=3306, engine=DEFAULT_ENGINE, autocommit=True, **kwargs):
"""Create the specified database. If it already exists,
an exception of type weedb.DatabaseExistsError will be thrown."""
# Open up a connection w/o specifying the database.
connect = Connection(host=host,
port=int(port),
user=user,
password=password,
autocommit=autocommit,
**kwargs)
cursor = connect.cursor()
try:
# Now create the database.
cursor.execute("CREATE DATABASE %s" % (database_name,))
finally:
cursor.close()
connect.close()
def drop(host='localhost', user='', password='', database_name='',
driver='', port=3306, engine=DEFAULT_ENGINE, autocommit=True,
**kwargs): # @UnusedVariable
"""Drop (delete) the specified database."""
# Open up a connection
connect = Connection(host=host,
port=int(port),
user=user,
password=password,
autocommit=autocommit,
**kwargs)
cursor = connect.cursor()
try:
cursor.execute("DROP DATABASE %s" % database_name)
finally:
cursor.close()
connect.close()
class Connection(weedb.Connection):
"""A wrapper around a MySQL connection object."""
@guard
def __init__(self, host='localhost', user='', password='', database_name='',
port=3306, engine=DEFAULT_ENGINE, autocommit=True, **kwargs):
"""Initialize an instance of Connection.
Parameters:
host: IP or hostname with the mysql database (required)
user: User name (required)
password: The password for the username (required)
database_name: The database to be used. (required)
port: Its port number (optional; default is 3306)
engine: The MySQL database engine to use (optional; default is 'INNODB')
autocommit: If True, autocommit is enabled (default is True)
kwargs: Any extra arguments you may wish to pass on to MySQL
connect statement. See the file MySQLdb/connections.py for a list (optional).
"""
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=password,
db=database_name, **kwargs)
weedb.Connection.__init__(self, connection, database_name, 'mysql')
# Set the storage engine to be used
set_engine(self.connection, engine)
# Set the transaction isolation level.
self.connection.query("SET TRANSACTION ISOLATION LEVEL READ COMMITTED")
self.connection.autocommit(to_bool(autocommit))
def cursor(self):
"""Return a cursor object."""
# The implementation of the MySQLdb cursor is lame enough that we are
# obliged to include a wrapper around it:
return Cursor(self)
@guard
def tables(self):
"""Returns a list of tables in the database."""
table_list = list()
# Get a cursor directly from MySQL
cursor = self.connection.cursor()
try:
cursor.execute("""SHOW TABLES;""")
while True:
row = cursor.fetchone()
if row is None: break
# Extract the table name. In case it's in unicode, convert to a regular string.
table_list.append(str(row[0]))
finally:
cursor.close()
return table_list
@guard
def genSchemaOf(self, table):
"""Return a summary of the schema of the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
# Get a cursor directly from MySQL:
cursor = self.connection.cursor()
try:
# If the table does not exist, this will raise a MySQL ProgrammingError exception,
# which gets converted to a weedb.OperationalError exception by the guard decorator
cursor.execute("""SHOW COLUMNS IN %s;""" % table)
irow = 0
while True:
row = cursor.fetchone()
if row is None: break
# Append this column to the list of columns.
colname = str(row[0])
if row[1].upper() == 'DOUBLE':
coltype = 'REAL'
elif row[1].upper().startswith('INT'):
coltype = 'INTEGER'
elif row[1].upper().startswith('CHAR'):
coltype = 'STR'
else:
coltype = str(row[1]).upper()
is_primary = True if row[3] == 'PRI' else False
can_be_null = False if row[2] == '' else to_bool(row[2])
yield (irow, colname, coltype, can_be_null, row[4], is_primary)
irow += 1
finally:
cursor.close()
@guard
def columnsOf(self, table):
"""Return a list of columns in the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
column_list = [row[1] for row in self.genSchemaOf(table)]
return column_list
@guard
def get_variable(self, var_name):
cursor = self.connection.cursor()
try:
cursor.execute("SHOW VARIABLES LIKE '%s';" % var_name)
row = cursor.fetchone()
# This is actually a 2-way tuple (variable-name, variable-value),
# or None, if the variable does not exist.
return row
finally:
cursor.close()
@guard
def begin(self):
"""Begin a transaction."""
self.connection.query("START TRANSACTION")
@guard
def commit(self):
self.connection.commit()
@guard
def rollback(self):
self.connection.rollback()
class Cursor(object):
"""A wrapper around the MySQLdb cursor object"""
@guard
def __init__(self, connection):
"""Initialize a Cursor from a connection.
connection: An instance of db.mysql.Connection"""
# Get the MySQLdb cursor and store it internally:
self.cursor = connection.connection.cursor()
@guard
def execute(self, sql_string, sql_tuple=()):
"""Execute a SQL statement on the MySQL server.
sql_string: A SQL statement to be executed. It should use ? as
a placeholder.
sql_tuple: A tuple with the values to be used in the placeholders."""
# MySQL uses '%s' as placeholders, so replace the ?'s with %s
mysql_string = sql_string.replace('?', '%s')
# Convert sql_tuple to a plain old tuple, just in case it actually
# derives from tuple, but overrides the string conversion (as is the
# case with a TimeSpan object):
self.cursor.execute(mysql_string, tuple(sql_tuple))
return self
def fetchone(self):
# Get a result from the MySQL cursor, then run it through the _massage
# filter below
return _massage(self.cursor.fetchone())
def drop_columns(self, table, column_names):
"""Drop the set of 'column_names' from table 'table'.
table: The name of the table from which the column(s) are to be dropped.
column_names: A set (or list) of column names to be dropped. It is not an error to try to drop
a non-existent column.
"""
for column_name in column_names:
self.execute("ALTER TABLE %s DROP COLUMN %s;" % (table, column_name))
def close(self):
try:
self.cursor.close()
del self.cursor
except AttributeError:
pass
#
# Supplying functions __iter__ and next allows the cursor to be used as an iterator.
#
def __iter__(self):
return self
def __next__(self):
result = self.fetchone()
if result is None:
raise StopIteration
return result
# For Python 2 compatibility:
next = __next__
def __enter__(self):
return self
def __exit__(self, etyp, einst, etb): # @UnusedVariable
self.close()
#
# This is a utility function for converting a result set that might contain
# longs or decimal.Decimals (which MySQLdb uses) to something containing just ints.
#
def _massage(seq):
# Return the _massaged sequence if it exists, otherwise, return None
if seq is not None:
return [int(i) if isinstance(i, (six.integer_types, decimal.Decimal)) else i for i in seq]
def set_engine(connect, engine):
"""Set the default MySQL storage engine."""
try:
server_version = connect._server_version
except AttributeError:
server_version = connect.server_version
# Some servers return lists of ints, some lists of strings, some a string.
# Try to normalize:
if isinstance(server_version, (tuple, list)):
server_version = '%s.%s' % server_version[:2]
if server_version >= '5.5':
connect.query("SET default_storage_engine=%s" % engine)
else:
connect.query("SET storage_engine=%s;" % engine)
|
# bruteforce imports
from bruteforce.bruteforce import bruteforce
# configs imports
from configs import configs
# crawler imports
from crawler.crawler import Crawler
# funcs imports
from funcs.tokenizer import tokenize_html
# network imports
from network.Socket import Socket
from network.HttpRequest import HttpRequest
# utils imports
from utils.login_utils import detect_login
from utils.link_utils import dom_family
from utils.constants import HTTP_UA_CHROME
# tests imports
from tests.test_transform import TransformTest
from tests.test_link_utils import LinkUtilsTest
from tests.test_login_utils import LoginUtilsTest
if __name__ == "__main__":
# Change the value of test to change what is tested
tests = ["brute_force", "configs", "crawler", "http_local", "http_requests",
"test_dom_family", "test_link_utils", "test_login_utils", "test_transform"]
test = "crawler"
if test not in tests:
print(f"Test {test} is invalid.")
pass
# brute_force
if test == "brute_force":
# host = "localhost"
# port = 5000
# url = "/login"
# ua = "googlebot"
host = "forum.172.16.58.3.xip.io"
port = 80
url = "/login/"
ua = "chrome"
request = HttpRequest(host, port, "GET")
response = request.send_get_request(url,host,ua)
if response is not None:
body = response.body
# Detect if there is a login form present, and get login fields
login = detect_login(body,host+url)
if login is not None:
form_url, user_key, pass_key, action_val = login
words = tokenize_html(response.response, False)
if "<EMAIL>" in words and "test" in words:
words = {"<EMAIL>","test"}
# if "<EMAIL>" in words:
# words = {"<EMAIL>","admin"}
post_req = HttpRequest(host, port, "POST")
success = bruteforce(post_req, form_url, host, port, ua, user_key, pass_key, action_val, words)
print("Successful Logins:")
for cred in success:
print(f' user = {cred.user}, pass = {cred.password}')
elif test == "test_dom_family":
dom_family("a.com", "b.a.com")
dom_family("x.com", "x.com/login")
elif test == "crawler":
url = "http://forum.192.168.3.11.xip.io/login/"
# url = "http://w.com"
# url = "http://email.kumo.x10host.com"
# url = "mizio.herokuapp.com/test"
# url = "http://192.168.3.11.xip.io/"
method = "bfs"
agent = HTTP_UA_CHROME
depth = 20
pages = 100
crawler = Crawler()
crawler.crawl(url, method, agent, depth, pages)
# configs
elif test == "configs":
config = configs.DEFAULT_CONFIGS
for val in config:
print("%s : %s" % (val,config[val]))
# http_local
elif test == "http_local":
host = "localhost"
port = 5000
url = "/"
ua = "chrome"
# Test GET
request = HttpRequest(host, port, "GET")
response = request.send_get_request(url, host, ua)
if response is not None:
print(response.response)
print("---------------")
# Test POST login success
request = HttpRequest(host, port, "POST")
url = "/login"
data = {"email":"<EMAIL>", "password":"<PASSWORD>"}
content_type = "application/x-www-form-urlencoded"
body = HttpRequest.generate_post_body(content_type,data)
content_length = len(body)
if body is not None:
response = request.send_post_request(url, host, ua, content_type, content_length, body)
if response is not None:
print(response.response)
print("---------------")
# Test POST login fail
request = HttpRequest(host, port, "POST")
url = "/login"
content_type = "application/x-www-form-urlencoded"
data = {"email":"<EMAIL>", "password":"<PASSWORD>"}
body = HttpRequest.generate_post_body(content_type,data)
content_length = len(body)
if body is not None:
response = request.send_post_request(url, host, ua, content_type, content_length, body)
if response is not None:
print(response.response)
print("---------------")
# Test POST funform fail
url = "/login"
receive = True
content_type = "application/x-www-form-urlencoded"
data = {"email":"<EMAIL>", "password":"<PASSWORD>"}
request = HttpRequest(host, port, "POST")
body = HttpRequest.generate_post_body(content_type,data)
content_length = len(body)
if body is not None:
response = request.send_post_request(url, host, ua, content_type, content_length, body)
if response is not None:
print(response.response)
print("---------------")
# Test POST funform fail
url = "/funform"
receive = True
content_type = "application/x-www-form-urlencoded"
data = {"email":"admin", "password":"<PASSWORD>", "btn":"login"}
request = HttpRequest(host, port, "POST")
body = HttpRequest.generate_post_body(content_type,data)
content_length = len(body)
if body is not None:
response = request.send_post_request(url, host, ua, content_type, content_length, body)
if response is not None:
print(response.response)
# http_requests
elif test == "http_requests":
host = "172.16.58.3.xip.io"
port = 80
url = "/"
ua = "chrome"
num_get_req = 1
# Test GET requests
request = HttpRequest(host,port,"GET")
for i in range(num_get_req):
print("request %d" % (i))
response = request.send_get_request(url,host,ua)
if response is not None:
print(response.response)
body = response.body
print(body)
tuple_ = response.status_code
status_code = tuple_[0] if tuple_ is not None else None
redirect_url = tuple_[1] if tuple_ is not None else None
if status_code is not None:
print("status code %s" % (status_code))
if status_code[:1] == "3":
print("redirect url %s" % (redirect_url))
# Separate the output.
print("---------------")
# Test POST requests
# url = "/post"
# receive = True
# content_type = "application/x-www-form-urlencoded"
# data = {"hi":"world"}
# num_post_req = 1
# request = HttpRequest(host,port,"POST")
# body = HttpRequest.generate_post_body(content_type,data)
# content_length = len(body)
# for i in range(num_post_req):
# print(f'Request {i}')
# print('-------------')
# if body is not None:
# response = request.send_post_request(url, host, ua, content_type, content_length, body)
# if response is not None:
# print(response.response)
# tuple_ = response.status_code
# status_code = tuple_[0] if tuple_ is not None else None
# redirect_url = tuple_[1] if tuple_ is not None else None
# if status_code is not None:
# print("status code %s" % (status_code))
# if status_code[:1] == "3":
# print("redirect url %s" % (redirect_url))
# test_link_utils
elif test == "test_link_utils":
test_link_utils = LinkUtilsTest()
test_link_utils.test_one_layer()
test_link_utils.test_multiple_layers()
test_link_utils.test_link_layer()
test_link_utils.test_link_retrieval()
test_link_utils.test_link_retrieval_layers()
test_link_utils.test_link_retrieval_relative()
test_link_utils.test_domain_family()
print("test_link_utils passed")
# test_login_utils
elif test == "test_login_utils":
test_login_utils = LoginUtilsTest()
test_login_utils.test_login_detection()
print("test_login_utils passed")
# test_transform
elif test == "test_transform":
test_transform = TransformTest()
test_transform.test_upper()
test_transform.test_lower()
test_transform.test_reverse()
test_transform.test_leet()
print("test_transform passed")
|
<reponame>clatterrr/NumericalComputationProjectsCollection<filename>FiniteElement/fem50/fem50.py
import numpy as np
import math
"""
D:\FluidSim\FluidSim\FEMNEW\fem50-master\src
https://github.com/cpraveen/fem50
Remarks around 50 lines of Matlab: short finite element implementation
"""
coordinates = np.array([[0,0],[1,0],[1.59,0],
[2,1],[3,1.41],[3,2],
[3,3],[2,3],[1,3],
[0,3],[0,2],[0,1],
[1,1],[1,2],[2,2]])
dirichlet = np.array([[3,4],[4,5],[7,8],[8,9],
[9,10],[10,11],[11,12],[12,1]])
dirichlet -= 1
elements3 = np.array([[2,3,13],[3,4,13],
[4,5,15],[5,6,15]])
elements3 -= 1
elements4 = np.array([[1,2,13,12],
[12,13,14,11],
[13,4,15,14],
[11,14,9,10],
[14,15,8,9],
[15,6,7,8]])
elements4 -= 1
neumann = np.array([[5,6],[6,7],[1,2],[2,3]])
neumann -= 1
Nx = coordinates.shape[0]
A = np.zeros((Nx,Nx))
b = np.zeros((Nx))
for k in range(elements3.shape[0]):
x0 = coordinates[elements3[k,0],0]
y0 = coordinates[elements3[k,0],1]
x1 = coordinates[elements3[k,1],0]
y1 = coordinates[elements3[k,1],1]
x2 = coordinates[elements3[k,2],0]
y2 = coordinates[elements3[k,2],1]
G0 = np.array([[1,1,1],[x0,x1,x2],[y0,y1,y2]])
dG = np.linalg.det(np.array([[1,1,1],[0,2,0],[0,0,1]])) # 算面积
G1 = np.array([[0,0],[1,0],[0,1]])
G0inv = np.linalg.inv(G0)
G = np.dot(G0inv,G1)
M = np.dot(np.linalg.det(G0),G)
M = np.dot(M,np.transpose(G)) / 2
for i in range(3):
for j in range(3):
idx0 = elements3[k,i]
idx1 = elements3[k,j]
A[idx0,idx1] += M[i,j]
for k in range(elements4.shape[0]):
x0 = coordinates[elements4[k,0],0]
y0 = coordinates[elements4[k,0],1]
x1 = coordinates[elements4[k,1],0]
y1 = coordinates[elements4[k,1],1]
x2 = coordinates[elements4[k,2],0]
y2 = coordinates[elements4[k,2],1]
x3 = coordinates[elements4[k,3],0]
y3 = coordinates[elements4[k,3],1]
Dphi = np.array([[x1 - x0,y1 - y0],
[x3 - x0,y3 - y0]])
B = np.linalg.inv(np.dot(Dphi,np.transpose(Dphi)))
C1 = (np.array([[2,-2],[-2,2]])*B[0,0]
+ np.array([[3,0],[0,-3]])*B[0,1]
+ np.array([[2,1],[1,2]])*B[1,1])
C2 = (np.array([[-1,1],[1,-1]])*B[0,0]
+ np.array([[-3,0],[0,3]])*B[0,1]
+ np.array([[-1,-2],[-2,-1]])*B[1,1])
M = np.zeros((4,4))
M[0:2,0:2] = M[2:4,2:4] = C1
M[0:2,2:4] = M[2:4,0:2] = C2
M = M * np.linalg.det(Dphi) / 6
for i in range(4):
for j in range(4):
idx0 = elements4[k,i]
idx1 = elements4[k,j]
A[idx0,idx1] += M[i,j]
# Volume Forces
for k in range(elements3.shape[0]):
x0 = coordinates[elements3[k,0],0]
y0 = coordinates[elements3[k,0],1]
x1 = coordinates[elements3[k,1],0]
y1 = coordinates[elements3[k,1],1]
x2 = coordinates[elements3[k,2],0]
y2 = coordinates[elements3[k,2],1]
G0 = np.array([[1,1,1],[x0,x1,x2],[y0,y1,y2]])
for i in range(3):
b[elements3[k,i]] += np.linalg.det(G0)*0
for k in range(elements4.shape[0]):
x0 = coordinates[elements4[k,0],0]
y0 = coordinates[elements4[k,0],1]
x1 = coordinates[elements4[k,1],0]
y1 = coordinates[elements4[k,1],1]
x2 = coordinates[elements4[k,2],0]
y2 = coordinates[elements4[k,2],1]
G0 = np.array([[1,1,1],[x0,x1,x2],[y0,y1,y2]])
for i in range(4):
b[elements4[k,i]] += np.linalg.det(G0)*0
for k in range(neumann.shape[0]):
x0 = coordinates[neumann[k,0],0]
y0 = coordinates[neumann[k,0],1]
x1 = coordinates[neumann[k,1],0]
y1 = coordinates[neumann[k,1],1]
norm = np.sqrt((x0-x1)**2 + (y0 - y1)**2)
for i in range(2):
b[neumann[k,i]] += norm * 0
u = np.zeros((Nx))
for k in range(dirichlet.shape[0]):
x0 = coordinates[dirichlet[k,0],0]
y0 = coordinates[dirichlet[k,0],1]
theta = math.atan2(y0,x0)
if theta < 0:
theta = theta + 2 * np.pi
r = np.sqrt(x0**2+y0**2)
value = r ** (2/3) * np.sin(2 * theta / 3)
u[dirichlet[k,0]] = value
x0 = coordinates[dirichlet[k,1],0]
y0 = coordinates[dirichlet[k,1],1]
theta = math.atan2(y0,x0)
if theta < 0:
theta = theta + 2 * np.pi
r = np.sqrt(x0**2+y0**2)
value = r ** (2/3) * np.sin(2 * theta / 3)
u[dirichlet[k,1]] = value
b0 = np.zeros((Nx))
for i in range(Nx):
for j in range(Nx):
b0[i] += (b[i] - A[i,j]*u[j])
freenodes = np.array([1,5,12,13,14],dtype = int)
numfree = freenodes.shape[0]
A1 = np.zeros((numfree,numfree))
b1 = np.zeros((numfree))
for i in range(numfree):
for j in range(numfree):
A1[i,j] = A[freenodes[i],freenodes[j]]
b1[i] = b0[freenodes[i]]
u1 = np.dot(np.linalg.inv(A1),b1)
idx = 0
for i in range(Nx):
if freenodes[idx] == i:
u[i] = u1[idx]
idx += 1
|
<filename>src/booking/views.py
import datetime
from datetime import timedelta
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.http import Http404
from django.contrib import messages
from usermgmt.models import Profile
from .models import Activity, Ticket
from .forms import UserInfoForm, PaymentForm, UserIdentifyForm
from .task_functions import success_email
def activities_view(request):
ctx = {}
ctx["qs"] = Activity.objects.all().order_by("child_price")
ctx["cart"] = cart = request.session.get("cart", [])
if request.method == "POST":
# Get form data
data = request.POST
adult_count = data.get("adult_count", None)
child_count = data.get("child_count", None)
product_id = data.get("product_id", None)
if adult_count and child_count and product_id:
if adult_count.isdigit() and child_count.isdigit() and (int(adult_count) > 0 or int(child_count) > 0):
# Save item to cart
obj = Activity.objects.get(product_id=product_id)
cart.append({
"name":obj.name,
"description":obj.description,
"adult_price":obj.adult_price,
"child_price":obj.child_price,
"product_id":product_id,
"adult_count":adult_count,
"child_count":child_count,
})
request.session["cart"] = cart
abs_link = reverse('booking:cart')
messages.info(request, f"Successfully added '{obj.name}' to cart. <a href='{abs_link}'>Continue to checkout?</a>")
else:
messages.error(request, f"Number of children / adults must be greater than zero")
template_file = "booking/activities.html"
return render(request, template_file, ctx)
def cart_view(request):
ctx = {}
# Get card data from session
ctx["cart"] = cart = request.session.get("cart", [])
ctx["card_items_count"] = len(cart)
ctx["total"] = 0
# Calculate the total cost of all cart items
for i in range(len(cart)):
item = cart[i]
cart[i]["total"] = total = item["adult_price"] * float(item["adult_count"]) + item["child_price"] * float(item["child_count"])
ctx["total"] += total
if request.method == "POST":
# Get form data
data = request.POST
adult_count = data.get("adult_count", None)
child_count = data.get("child_count", None)
product_id = data.get("product_id", None)
if adult_count and child_count and product_id:
cart = request.session.get("cart", [])
# If cart item match the item submitted in form then delete it from cart
for i in range(len(cart)):
v = cart[i]
if v["adult_count"] == adult_count and v["child_count"] == child_count and v["product_id"] == product_id:
del cart[i]
break
request.session["cart"] = cart
template_file = "booking/cart.html"
return render(request, template_file, ctx)
def checkout_step1_view(request):
ctx = {}
ctx["cart"] = cart = request.session.get("cart", [])
if not cart:
return redirect("booking:activities") # Redirect if there are no cart items
ctx["card_items_count"] = len(cart)
# Calculate the total cost of all cart items
ctx["total"] = 0
for i in range(len(cart)):
item = cart[i]
cart[i]["total"] = total = item["adult_price"] * float(item["adult_count"]) + item["child_price"] * float(item["child_count"])
ctx["total"] += total
if request.method == "POST":
form = UserInfoForm(request.POST)
if form.is_valid():
# Get form data and save it to sessions
data = form.cleaned_data
info = {
"first_name":data.get("first_name"),
"last_name":data.get("last_name"),
"email":data.get("email"),
"phone":data.get("phone"),
"date":str(data.get("date").date()),
"date_repeat":str(data.get("date_repeat").date()),
}
request.session["user_info"] = info
return redirect("booking:checkout-step2")
else:
# Pre-populate form fields with existing user data
initial = {}
info = request.session.get("user_info")
if info:
initial = {
"first_name": info["first_name"],
"last_name": info["last_name"],
"email": info["email"],
"phone": info["phone"],
"date": datetime.datetime.strptime(info["date"], '%Y-%m-%d').date(),
"date_repeat": datetime.datetime.strptime(info["date_repeat"], '%Y-%m-%d').date(),
}
form = UserInfoForm(initial=initial)
# Set min, max values for date inputs
form.fields["date"].widget.attrs.update({'min': datetime.date.today() + timedelta(days=1)})
form.fields["date_repeat"].widget.attrs.update({'min': datetime.date.today() + timedelta(days=1)})
ctx["form"] = form
template_file = "booking/checkout_step1.html"
return render(request, template_file, ctx)
def checkout_step2_view(request):
ctx = {}
ctx["cart"] = cart = request.session.get("cart", [])
if not cart:
return redirect("booking:activities")
ctx["card_items_count"] = len(cart)
# Calculate the total cost of all cart items
ctx["total"] = 0
for i in range(len(cart)):
item = cart[i]
cart[i]["total"] = total = item["adult_price"] * float(item["adult_count"]) + item["child_price"] * float(item["child_count"])
ctx["total"] += total
if request.method == "POST":
form = PaymentForm(request.POST)
if form.is_valid():
data = form.cleaned_data
info = request.session.get("user_info", [])
if not info: # Make sure all user data are present
return redirect("booking:checkout-step1")
# Create a user profile
profile = Profile.objects.create(
first_name=info["first_name"],
last_name=info["last_name"],
email=info["email"],
phone=info["phone"]
)
# Create a ticket object for every cart item
for item in cart:
obj = Ticket.objects.create(
user=profile,
activity=get_object_or_404(Activity, product_id=item["product_id"]),
adult_count=item["adult_count"],
child_count=item["child_count"],
expected_activation_date=datetime.datetime.strptime(info["date"].split(' ')[0], '%Y-%m-%d'),
)
# Set success id in sessions
request.session["success_id"] = profile.slug
# Delete session data
request.session.pop('cart', None)
request.session.pop('user_info', None)
# Send success email
success_email(
profile.email,
profile.get_full_name(),
str(len(cart)),
profile.slug,
info["date"].split(" ")[0],
)
return redirect("booking:checkout-success")
else:
form = PaymentForm()
ctx["form"] = form
template_file = "booking/checkout_step2.html"
return render(request, template_file, ctx)
def success_view(request, success_id=None):
ctx = {}
cached_success_id = request.session.get("success_id", None)
if not (cached_success_id or success_id):
raise Http404("Order not found")
# Success id validation
if cached_success_id and cached_success_id == success_id or not success_id:
ctx["profile"] = get_object_or_404(Profile, slug=cached_success_id)
elif request.user.is_staff:
ctx["profile"] = get_object_or_404(Profile, slug=success_id)
elif success_id:
# Show email verification form if cached success id is not present or does not match the success id in url
profile = get_object_or_404(Profile, slug=success_id)
if request.method == "POST":
form = UserIdentifyForm(request.POST)
if form.is_valid():
if form.cleaned_data.get("email") == profile.email:
ctx["profile"] = profile
request.session["success_id"] = success_id
else:
messages.add_message(request, messages.ERROR, "Order not found")
else:
form = UserIdentifyForm()
ctx["form"] = form
if "profile" in ctx:
# Get all tickets linked to this user
ctx["qs"] = Ticket.objects.filter(user=ctx["profile"])
template_file = "booking/success.html"
return render(request, template_file, ctx)
|
#!/usr/bin/python
#analyza.py
import sys
import os
import json
import cx_Oracle
from modules import *
#------------------------------------------------------------------------------
# MAIN driver code
#------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Woops... Somthing went wrong...")
print("[Usage]: analyza.py [source-dir path]")
exit(1)
path = sys.argv[1]
path = path[:len(path)-1] if path[len(path)-1] == "/" else path
dirlist = util.get_dirlist(path)
searchWords = []
visual_data = []
# idx = 0
for dir_ in dirlist:
try:
# 그룹 정보 생성(검색어 디렉토리 명 ex: D_K_01)
group = dir_[dir_.rfind("/")+1:]
# 검색어 추출하기
searchword = util.get_searchword(dir_)
# 로그
print("분석 시작: {}\t{}".format(dir_, searchword))
# 대표 키워드 리스트 생성
(docs, sigwords) = tfidf.get_sigwords_by_tf(dir_, searchword)
# 디스턴스 매트릭스 생성
distDF = word2veca.create_distance_df(docs, sigwords, 20)
except Exception as e:
print(e)
else:
# exception 가능성이 있는 작업들이 모두 끝난 후에야
# DATABASE에 넣을 자료들을 준비한다.
searchWords.append({'key' : group, 'searchword' : searchword})
# 시각화 정보 컬럼 데이터 만들기 json 형식
nodes = []
colnames = list(distDF.columns)
for i, colname in enumerate(colnames):
for word in sigwords:
if colname == word['word']:
nodes.append({
'group' : group,
'id' : group+"_"+("%02d"%(i+1)),
'word' : colname,
'val' : word['TF_score']
})
visual_data.append({
'nodes' : nodes,
'dmatrix' : distDF.to_csv()
})
finally:
None
# 테스트 코드
# idx += 1
# if idx > 1: break
#--------------------------------------------------------------------------
# DATABASE 컬럼 데이터 생성
#--------------------------------------------------------------------------
divIdx = path.rfind("/")
yymmdd = int(path[divIdx-6:divIdx])
hhmm = int(path[divIdx+1:])
searchword = json.dumps(searchWords, ensure_ascii=False)
visdata = json.dumps(visual_data, ensure_ascii=False)
# 테스트 저장
with open(os.path.join(path, "visdata.txt"), "w", encoding="utf-8-sig") \
as fp:
fp.write(visdata)
#--------------------------------------------------------------------------
# Oracle Database에 Insert
#--------------------------------------------------------------------------
os.putenv('NLS_LANG', '.UTF8')
con = cx_Oracle.connect(config.oracle_connection)
cur = con.cursor()
statement = "".join([
"insert into latte_timeline(yymmdd, hhmm, searchword, visdata) ",
"values (:1, :2, :3, :4)"])
cur.execute(statement, (yymmdd, hhmm, searchword, visdata))
cur.close()
con.commit()
con.close()
# 입력 확인 테스트 코드
con = cx_Oracle.connect(config.oracle_connection)
cur = con.cursor()
statement = "".join([
"select * from latte_timeline where yymmdd = :1 and hhmm = :2"
])
cur.execute(statement, (yymmdd, hhmm))
for row in cur:
print(row)
cur.close()
con.close()
# #----------------------------------------
# # 개별 키워드 테스트 루틴 - 디버깅시 필요!
# #----------------------------------------
# path = "../../data/timeline/191017/1630/D_K_01/"
# path = path[:len(path)-1] if path[len(path)-1] == "/" else path
# # 검색어 추출하기
# searchword = util.get_searchword(path)
# print(searchword)
# # 대표 키워드 리스트 생성
# (docs, sigwords) = tfidf.get_sigwords_by_tf(path, searchword)
# # 디스턴스 매트릭스 생성
# distDF = word2veca.create_distance_df(docs, sigwords, 20)
# print(distDF)
# #----------------------------------------
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for RetinaNet models."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.beta.modeling import retinanet_model
from official.vision.beta.modeling.backbones import resnet
from official.vision.beta.modeling.decoders import fpn
from official.vision.beta.modeling.heads import dense_prediction_heads
from official.vision.beta.modeling.layers import detection_generator
from official.vision.beta.ops import anchor
class RetinaNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
{
'use_separable_conv': True,
'build_anchor_boxes': True,
'is_training': False,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': True,
'is_training': False,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': False,
'is_training': False,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': False,
'is_training': True,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': True,
'is_training': True,
'has_att_heads': True
},
{
'use_separable_conv': False,
'build_anchor_boxes': True,
'is_training': False,
'has_att_heads': True
},
)
def test_build_model(self, use_separable_conv, build_anchor_boxes,
is_training, has_att_heads):
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
anchor_size = 3
fpn_num_filters = 256
head_num_convs = 4
head_num_filters = 256
num_anchors_per_location = num_scales * len(aspect_ratios)
image_size = 384
images = np.random.rand(2, image_size, image_size, 3)
image_shape = np.array([[image_size, image_size], [image_size, image_size]])
if build_anchor_boxes:
anchor_boxes = anchor.Anchor(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
image_size=(image_size, image_size)).multilevel_boxes
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1])
else:
anchor_boxes = None
if has_att_heads:
attribute_heads = [dict(name='depth', type='regression', size=1)]
else:
attribute_heads = None
backbone = resnet.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
num_filters=fpn_num_filters,
use_separable_conv=use_separable_conv)
head = dense_prediction_heads.RetinaNetHead(
min_level=min_level,
max_level=max_level,
num_classes=num_classes,
attribute_heads=attribute_heads,
num_anchors_per_location=num_anchors_per_location,
use_separable_conv=use_separable_conv,
num_convs=head_num_convs,
num_filters=head_num_filters)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=10)
model = retinanet_model.RetinaNetModel(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size)
_ = model(images, image_shape, anchor_boxes, training=is_training)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
image_size=[
(128, 128),
],
training=[True, False],
has_att_heads=[True, False],
output_intermediate_features=[True, False],
soft_nms_sigma=[None, 0.0, 0.1],
))
def test_forward(self, strategy, image_size, training, has_att_heads,
output_intermediate_features, soft_nms_sigma):
"""Test for creation of a R50-FPN RetinaNet."""
tf.keras.backend.set_image_data_format('channels_last')
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
num_anchors_per_location = num_scales * len(aspect_ratios)
images = np.random.rand(2, image_size[0], image_size[1], 3)
image_shape = np.array(
[[image_size[0], image_size[1]], [image_size[0], image_size[1]]])
with strategy.scope():
anchor_gen = anchor.build_anchor_generator(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3)
anchor_boxes = anchor_gen(image_size)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1])
backbone = resnet.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level)
if has_att_heads:
attribute_heads = [dict(name='depth', type='regression', size=1)]
else:
attribute_heads = None
head = dense_prediction_heads.RetinaNetHead(
min_level=min_level,
max_level=max_level,
num_classes=num_classes,
attribute_heads=attribute_heads,
num_anchors_per_location=num_anchors_per_location)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=10,
nms_version='v1',
use_cpu_nms=soft_nms_sigma is not None,
soft_nms_sigma=soft_nms_sigma)
model = retinanet_model.RetinaNetModel(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator)
model_outputs = model(
images,
image_shape,
anchor_boxes,
output_intermediate_features=output_intermediate_features,
training=training)
if training:
cls_outputs = model_outputs['cls_outputs']
box_outputs = model_outputs['box_outputs']
for level in range(min_level, max_level + 1):
self.assertIn(str(level), cls_outputs)
self.assertIn(str(level), box_outputs)
self.assertAllEqual([
2,
image_size[0] // 2**level,
image_size[1] // 2**level,
num_classes * num_anchors_per_location
], cls_outputs[str(level)].numpy().shape)
self.assertAllEqual([
2,
image_size[0] // 2**level,
image_size[1] // 2**level,
4 * num_anchors_per_location
], box_outputs[str(level)].numpy().shape)
if has_att_heads:
att_outputs = model_outputs['attribute_outputs']
for att in att_outputs.values():
self.assertAllEqual([
2, image_size[0] // 2**level, image_size[1] // 2**level,
1 * num_anchors_per_location
], att[str(level)].numpy().shape)
else:
self.assertIn('detection_boxes', model_outputs)
self.assertIn('detection_scores', model_outputs)
self.assertIn('detection_classes', model_outputs)
self.assertIn('num_detections', model_outputs)
self.assertAllEqual(
[2, 10, 4], model_outputs['detection_boxes'].numpy().shape)
self.assertAllEqual(
[2, 10], model_outputs['detection_scores'].numpy().shape)
self.assertAllEqual(
[2, 10], model_outputs['detection_classes'].numpy().shape)
self.assertAllEqual(
[2,], model_outputs['num_detections'].numpy().shape)
if has_att_heads:
self.assertIn('detection_attributes', model_outputs)
self.assertAllEqual(
[2, 10, 1],
model_outputs['detection_attributes']['depth'].numpy().shape)
if output_intermediate_features:
for l in range(2, 6):
self.assertIn('backbone_{}'.format(l), model_outputs)
self.assertAllEqual([
2, image_size[0] // 2**l, image_size[1] // 2**l,
backbone.output_specs[str(l)].as_list()[-1]
], model_outputs['backbone_{}'.format(l)].numpy().shape)
for l in range(min_level, max_level + 1):
self.assertIn('decoder_{}'.format(l), model_outputs)
self.assertAllEqual([
2, image_size[0] // 2**l, image_size[1] // 2**l,
decoder.output_specs[str(l)].as_list()[-1]
], model_outputs['decoder_{}'.format(l)].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
num_anchors_per_location = num_scales * len(aspect_ratios)
backbone = resnet.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level)
head = dense_prediction_heads.RetinaNetHead(
min_level=min_level,
max_level=max_level,
num_classes=num_classes,
num_anchors_per_location=num_anchors_per_location)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=10)
model = retinanet_model.RetinaNetModel(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3)
config = model.get_config()
new_model = retinanet_model.RetinaNetModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
|
# -*- coding: utf-8 -*-
import datetime
import json
import uuid
import requests
from catalog.model.product import Product, ProductSchema
def test_get_products():
"""
Test for get_products
:return:
"""
try:
products = requests.get('http://127.0.0.1:5000/products')
assert products.status_code == 200
except Exception as e:
print(e)
assert False
def test_add_product():
"""
Test for add_product
:return:
"""
try:
login = requests.post('http://127.0.0.1:5000/login', data={}, auth=('<EMAIL>', '<PASSWORD>!'))
# login = requests.post('http://127.0.0.1:5000/login', data={}, auth=('<EMAIL>', '<PASSWORD>'))
if login.status_code == 200:
token = login.json()['token']
print(token)
data = Product(uuid.uuid4().__str__(), 'Silla', 50, 'Luuna',
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S'))
json_string = {
"name": data.name,
"price": data.price,
"brand": data.brand
}
date_str = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S').__str__()
date_format = datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S')
print(date_format)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'x-access-tokens': token}
response = requests.post('http://127.0.0.1:5000/products', data=json.dumps(json_string), headers=headers)
assert response.status_code == 200
except Exception as e:
print(e)
assert False
def test_update_product():
"""
Test for update_product
:return:
"""
try:
products = requests.get('http://127.0.0.1:5000/products')
login = requests.post('http://127.0.0.1:5000/login', data={}, auth=('<EMAIL>', '<PASSWORD>!'))
if products.status_code == 200 and login.status_code == 200:
token = login.json()['token']
print(token)
schema = ProductSchema(many=True)
all_product = schema.loads(products.text)
product_update = all_product[0]
product = Product(product_update['sku'], product_update['name'], product_update['price'],
product_update['brand'], product_update['created'])
product['price'] = 1000
json_string = product.to_json()
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'x-access-tokens': token}
response = requests.put('http://127.0.0.1:5000/products', data=json_string, headers=headers)
assert response.status_code == 200
except Exception as e:
print(e)
assert False
def test_delete_products():
"""
Test for delete_products
:return:
"""
try:
products = requests.get('http://127.0.0.1:5000/products')
login = requests.post('http://127.0.0.1:5000/login', data={}, auth=('<EMAIL>', '<PASSWORD>!'))
if products.status_code == 200 and login.status_code == 200:
token = login.json()['token']
print(token)
json_data = json.loads(products.text)
first_sku = json_data[0]['sku']
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'x-access-tokens': token}
response = requests.delete('http://127.0.0.1:5000/products/{}'.format(first_sku), headers=headers)
assert response.status_code == 200
except Exception as e:
print(e)
assert False
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class AzureParams(object):
"""Implementation of the 'AzureParams' model.
Specifies various resources when converting and deploying a VM to Azure.
Attributes:
availability_set_id (long|int): Specifies id of the Availability set
in which the VM is to be restored.
data_disk_type (DataDiskTypeEnum): Specifies the disk type used by the
data. 'kPremiumSSD' is disk type backed by SSDs, delivers high
performance, low latency disk support for VMs running I/O
intensive workloads. 'kStandardSSD' implies disk type that offers
more consistent performance and reliability than HDD.
'kStandardHDD' implies disk type backed by HDDs, delivers cost
effective storage.
instance_id (long|int): Specifies Type of VM (e.g. small, medium,
large) when cloning the VM in Azure.
network_resource_group_id (long|int): Specifies id of the resource
group for the selected virtual network.
os_disk_type (OsDiskTypeEnum): Specifies the disk type used by the OS.
'kPremiumSSD' is disk type backed by SSDs, delivers high
performance, low latency disk support for VMs running I/O
intensive workloads. 'kStandardSSD' implies disk type that offers
more consistent performance and reliability than HDD.
'kStandardHDD' implies disk type backed by HDDs, delivers cost
effective storage.
resource_group (long|int): Specifies id of the Azure resource group.
Its value is globally unique within Azure.
storage_account (long|int): Specifies id of the storage account that
will contain the storage container within which we will create the
blob that will become the VHD disk for the cloned VM.
storage_container (long|int): Specifies id of the storage container
within the above storage account.
storage_resource_group_id (long|int): Specifies id of the resource
group for the selected storage account.
subnet_id (long|int): Specifies Id of the subnet within the above
virtual network.
temp_vm_resource_group_id (long|int): Specifies the resource group
where temporary VM needs to be created.
temp_vm_storage_account_id (long|int): Specifies the Storage account
where temporary VM needs to be created.
temp_vm_storage_container_id (long|int): Specifies the Storage
container where temporary VM needs to be created.
temp_vm_subnet_id (long|int): Specifies the Subnet where temporary VM
needs to be created.
temp_vm_virtual_network_id (long|int): Specifies the Virtual network
where temporary VM needs to be created.
virtual_network_id (long|int): Specifies Id of the Virtual Network.
"""
# Create a mapping from Model property names to API property names
_names = {
"availability_set_id":'availabilitySetId',
"data_disk_type":'dataDiskType',
"instance_id":'instanceId',
"network_resource_group_id":'networkResourceGroupId',
"os_disk_type":'osDiskType',
"resource_group":'resourceGroup',
"storage_account":'storageAccount',
"storage_container":'storageContainer',
"storage_resource_group_id":'storageResourceGroupId',
"subnet_id":'subnetId',
"temp_vm_resource_group_id":'tempVmResourceGroupId',
"temp_vm_storage_account_id":'tempVmStorageAccountId',
"temp_vm_storage_container_id":'tempVmStorageContainerId',
"temp_vm_subnet_id":'tempVmSubnetId',
"temp_vm_virtual_network_id":'tempVmVirtualNetworkId',
"virtual_network_id":'virtualNetworkId'
}
def __init__(self,
availability_set_id=None,
data_disk_type=None,
instance_id=None,
network_resource_group_id=None,
os_disk_type=None,
resource_group=None,
storage_account=None,
storage_container=None,
storage_resource_group_id=None,
subnet_id=None,
temp_vm_resource_group_id=None,
temp_vm_storage_account_id=None,
temp_vm_storage_container_id=None,
temp_vm_subnet_id=None,
temp_vm_virtual_network_id=None,
virtual_network_id=None):
"""Constructor for the AzureParams class"""
# Initialize members of the class
self.availability_set_id = availability_set_id
self.data_disk_type = data_disk_type
self.instance_id = instance_id
self.network_resource_group_id = network_resource_group_id
self.os_disk_type = os_disk_type
self.resource_group = resource_group
self.storage_account = storage_account
self.storage_container = storage_container
self.storage_resource_group_id = storage_resource_group_id
self.subnet_id = subnet_id
self.temp_vm_resource_group_id = temp_vm_resource_group_id
self.temp_vm_storage_account_id = temp_vm_storage_account_id
self.temp_vm_storage_container_id = temp_vm_storage_container_id
self.temp_vm_subnet_id = temp_vm_subnet_id
self.temp_vm_virtual_network_id = temp_vm_virtual_network_id
self.virtual_network_id = virtual_network_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
availability_set_id = dictionary.get('availabilitySetId')
data_disk_type = dictionary.get('dataDiskType')
instance_id = dictionary.get('instanceId')
network_resource_group_id = dictionary.get('networkResourceGroupId')
os_disk_type = dictionary.get('osDiskType')
resource_group = dictionary.get('resourceGroup')
storage_account = dictionary.get('storageAccount')
storage_container = dictionary.get('storageContainer')
storage_resource_group_id = dictionary.get('storageResourceGroupId')
subnet_id = dictionary.get('subnetId')
temp_vm_resource_group_id = dictionary.get('tempVmResourceGroupId')
temp_vm_storage_account_id = dictionary.get('tempVmStorageAccountId')
temp_vm_storage_container_id = dictionary.get('tempVmStorageContainerId')
temp_vm_subnet_id = dictionary.get('tempVmSubnetId')
temp_vm_virtual_network_id = dictionary.get('tempVmVirtualNetworkId')
virtual_network_id = dictionary.get('virtualNetworkId')
# Return an object of this model
return cls(availability_set_id,
data_disk_type,
instance_id,
network_resource_group_id,
os_disk_type,
resource_group,
storage_account,
storage_container,
storage_resource_group_id,
subnet_id,
temp_vm_resource_group_id,
temp_vm_storage_account_id,
temp_vm_storage_container_id,
temp_vm_subnet_id,
temp_vm_virtual_network_id,
virtual_network_id)
|
## Animate fractals from an iterated function system
## by <NAME>
## <EMAIL>
## earlbellinger.com
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from tqdm import tqdm
from easing import easing
from PIL import ImageColor # for rgb
from numba import jit
from joblib import Parallel, delayed
import os
n_jobs = int(os.environ['OMP_NUM_THREADS'])
# maybe_sub.sh -p 20 python3 fractal_anim.py
N = 10**5 # number of points to generate
n_frames = 100 # number of frames in between each keyframe
width = 6 # inches wide
n = 3 # degree of the easing polynomial
n_first = 3 # in case we want a different easing for the first keyframe
#ns = [1, 1, 3, 3, 3, 3, 3, 3, 3, 1] # in case we want a list of easings
randints = np.random.randint(0, 2, N)
keyframes = [ # parameters alpha, beta, gamma, delta
[0.5 - 0.5j, 0, 0.5 - 0.5j, 0], # dragon
[0.4614 + 0.4614j, 0, 0.622 - 0.196j, 0], # shell
[0.5 - 0.5j, 0, 0.5 + 0.5j, 0], # flex
[0.7 - 0.4614j, 0, 0, 0 - 0.5j], # bush
[0.7 - 0.4614j, 0, 0, 0 + 0.45j], # static
[0, 0.5 + 0.5j, 0, -0.5 + 0.5j], # stars
[0, 0.3 + 0.3j, 0, 0.82], # leaves
[0, 0.5 + 0.5j, 0.5, 0], # sierp.
[0, 0.5 + 0.5j, 0, 0.4 - 0.4j], # tri.
[0.4614 + 0.4614j, 0, 0, 0.2896 - 0.585j], # static2
]
colors = [
["#0B86A7", "#183E56"], # blues
["#003049", "#D62828"], # blue and red
["#0B86A7", "#183E56"], # blues
["#679436", "#3C4600"], # foresty colors
["#D62828", "#003049"], # red and blue
["#353c16", "#562512"], # foliage
["#679436", "#3C4600"], # foresty colors
["#003049", "#D62828"], # blue and red
["#0B86A7", "#183E56"], # blues
["#D62828", "#003049"], # red and blue
]
keyframes = np.array([
np.array([[x.real, x.imag] for x in keyframe]).flatten()
for keyframe in keyframes])
keyframes = np.vstack((keyframes, keyframes[0])) # for looping
@jit(nopython=True)
def F1(z, alpha, beta):
return alpha*z + beta*np.conjugate(z)
@jit(nopython=True)
def F2(z, gamma, delta):
return gamma*(z-1) + delta*(np.conjugate(z)-1) + 1
def contractions(alpha_re=0.4614, alpha_im=0.4614,
beta_re=0, beta_im=0,
gamma_re=0, gamma_im=0,
delta_re=0.2896, delta_im=-0.585):
alpha = alpha_re + alpha_im*1j
beta = beta_re + beta_im*1j
gamma = gamma_re + gamma_im*1j
delta = delta_re + delta_im*1j
x = np.empty(N, dtype=complex)
x[0] = 0. #1. #np.random.random() #0.1
#randints = np.random.randint(0, 2, N)
for ii in range(1, N):
if randints[ii]:
x[ii] = F1(x[ii-1], alpha, beta)
else:
x[ii] = F2(x[ii-1], gamma, delta)
return x
def save_frame(frame,
filename='test.png',
cols=colors[0],
xlim=None,
ylim=None):
alpha_re, alpha_im, beta_re, beta_im, \
gamma_re, gamma_im, delta_re, delta_im = frame
alpha = alpha_re + alpha_im*1j
beta = beta_re + beta_im*1j
gamma = gamma_re + gamma_im*1j
delta = delta_re + delta_im*1j
x = contractions(*frame)
fig = plt.figure()
fig.set_size_inches(int(width * (1 + 5 ** 0.5) / 2), width)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
ax.set_xticks([])
ax.set_yticks([])
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
#plt.plot(x.real, x.imag, 'k.', alpha=0.444, ms=1, zorder=1)
plt.scatter(x.real[1:], x.imag[1:], alpha=0.8, s=2.5, marker='.',
c=[cols[r] for r in randints[:-1]])
#c='k')
mpl.rcParams['font.size'] = 18
plt.annotate(s=r'$\{ z \rightarrow \alpha z + \beta \bar z,\quad' + \
r'z \rightarrow \gamma (z-1) + \delta (\bar z - 1) + 1 \}$',
xy=(192, 572),
xycoords='figure pixels',
c='#1669BA', alpha=0.3, zorder=0)
mpl.rcParams['font.size'] = 22
plt.annotate(s=r'<NAME>',
#xy=(627, 10),
xy=(577, 10),
xycoords='figure pixels',
c='#1669BA', alpha=0.3, zorder=0)
mpl.rcParams['font.size'] = 18
plt.annotate(s=r'$\alpha = ' + f'{alpha:.2f}' + ',$' \
+ r'$\beta = ' + f'{beta:.2f}' + ',$\n' \
+ r'$\gamma = ' + f'{gamma:.2f}' + ',$' \
+ r'$\delta = ' + f'{delta:.2f}' + '$',
xy=(5, 10),
xycoords='figure pixels',
c='#1669BA', zorder=0, alpha=0.3)
plt.savefig(filename)
plt.close()
#save_frame(keyframes[5], xlim=[-0.5, 1.5], ylim=[-1, 1], cols=colors[5])
#quit()
k = 0
first = None
prev = None
for ii in tqdm(range(len(keyframes)-1)):
a = keyframes[ii]
b = keyframes[ii+1]
col_a = colors[ii % len(colors)]
col_b = colors[(ii+1) % len(colors)]
n_ = n
if prev is None:
a_frame = contractions(*a)
first = a_frame
n_ = n_first
else:
a_frame = prev
if ii < len(keyframes) - 1:
b_frame = contractions(*b)
else:
b_frame = first
n_ = n_first
col_b = colors[0]
prev = b_frame
#n_ = ns[ii]
#frames = np.linspace(a, b, num=n_frames)
frames = easing.Eased(np.vstack((a,b))).power_ease(n=n_,
smoothness=n_frames)[:n_frames-1]
# plot limits
a_xlim = [min(a_frame.real)-0.02, max(a_frame.real)+0.02]
b_xlim = [min(b_frame.real)-0.02, max(b_frame.real)+0.02]
a_ylim = [min(a_frame.imag)-0.19, max(a_frame.imag)+0.10]
b_ylim = [min(b_frame.imag)-0.19, max(b_frame.imag)+0.10]
xlims = easing.Eased(np.vstack((a_xlim, b_xlim))).power_ease(n=n_,
smoothness=n_frames)[:n_frames-1]
ylims = easing.Eased(np.vstack((a_ylim, b_ylim))).power_ease(n=n_,
smoothness=n_frames)[:n_frames-1]
# colors
rgb_a = np.array([ImageColor.getrgb(col) for col in col_a]).flatten()
rgb_b = np.array([ImageColor.getrgb(col) for col in col_b]).flatten()
rgb_cols = easing.Eased(np.vstack((rgb_a, rgb_b))).power_ease(n=n_,
smoothness=n_frames)[:n_frames-1]
hexs = []
for jj in range(len(rgb_cols)):
rgb = np.array(rgb_cols[jj], dtype=int)
rgb = [tuple(rgb[a:a+3]) for a in range(0, len(rgb), 3)]
hexs += [['#%02x%02x%02x' % a for a in rgb]]
# now calculate!
k = ii * len(frames)
Parallel(n_jobs=n_jobs)(delayed(save_frame)(frame,
filename='plots/'+str(k+jj).zfill(5)+'.png',
cols=hexs[jj],
xlim=xlims[jj],
ylim=ylims[jj])
for jj, frame in enumerate(frames))
"""
for jj, frame in enumerate(frames):
save_frame(frame,
filename='plots/'+str(k+jj).zfill(5)+'.png',
cols=hexs[jj],
xlim=xlims[jj],
ylim=ylims[jj])
"""
# ffmpeg -y -framerate 55 -i plots/%05d.png -ab 128k -r 30 -vcodec libx264 -crf 18 -preset veryslow fractals.avi
# maybe_sub.sh -p 1 ffmpeg -y -framerate 30 -i plots/%05d.png -ab 128k -r 30 -vcodec libx264 -crf 18 -preset veryslow fractals.avi
|
# Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import unittest
import os
import io
import warnings
import tempfile
import subprocess
from itertools import chain
from ddt import ddt, data
import garnett
import numpy as np
from tempfile import TemporaryDirectory
import base64
from garnett.posfilewriter import DEFAULT_SHAPE_DEFINITION
PATH = os.path.join(garnett.__path__[0], '..')
IN_PATH = os.path.abspath(PATH) == os.path.abspath(os.getcwd())
try:
try:
from hoomd import context
import hoomd
except ImportError:
from hoomd_script import context
HOOMD_v1 = True
else:
HOOMD_v1 = False
hoomd.util.quiet_status()
except ImportError:
HOOMD = False
else:
HOOMD = True
if HOOMD:
try:
if HOOMD_v1:
from hoomd_plugins import hpmc
else:
from hoomd import hpmc
except ImportError:
HPMC = False
else:
HPMC = True
else:
HPMC = False
class BasePosFileReaderTest(unittest.TestCase):
def read_trajectory(self, stream, precision=None):
reader = garnett.reader.PosFileReader(precision=precision)
return reader.read(stream)
def assert_raise_attribute_error(self, frame):
with self.assertRaises(AttributeError):
frame.velocity
with self.assertRaises(AttributeError):
frame.charge
with self.assertRaises(AttributeError):
frame.diameter
with self.assertRaises(AttributeError):
frame.moment_inertia
with self.assertRaises(AttributeError):
frame.angmom
with self.assertRaises(AttributeError):
frame.image
class BasePosFileWriterTest(BasePosFileReaderTest):
def dump_trajectory(self, trajectory):
writer = garnett.writer.PosFileWriter()
return writer.dump(trajectory)
def write_trajectory(self, trajectory, file, rotate=False):
writer = garnett.writer.PosFileWriter(rotate=rotate)
return writer.write(trajectory, file)
def assert_approximately_equal_frames(self, a, b,
decimals=6, atol=1e-5,
ignore_orientation=False):
self.assertEqual(a.box.round(decimals), b.box.round(decimals))
self.assertEqual(a.types, b.types)
self.assertTrue(np.allclose(a.position, b.position, atol=atol))
try:
self.assertTrue(np.allclose(a.velocity, b.velocity, atol=atol))
except AttributeError:
pass
if not ignore_orientation:
try:
self.assertTrue(np.allclose(a.orientation, b.orientation, atol=atol))
except AttributeError:
pass
self.assertEqual(a.data, b.data)
for key in chain(a.shapedef, b.shapedef):
self.assertEqual(a.shapedef[key], b.shapedef[key])
class PosFileReaderTest(BasePosFileReaderTest):
def test_read_empty(self):
empty_sample = io.StringIO("")
with self.assertRaises(garnett.errors.ParserError):
self.read_trajectory(empty_sample)
def test_read_garbage(self):
garbage_sample = io.StringIO(str(os.urandom(1024 * 100)))
with self.assertRaises(garnett.errors.ParserError):
self.read_trajectory(garbage_sample)
def test_hpmc_dialect(self):
sample = io.StringIO(garnett.samples.POS_HPMC)
traj = self.read_trajectory(sample)
box_expected = garnett.trajectory.Box(Lx=10, Ly=10, Lz=10)
for frame in traj:
N = len(frame)
self.assertEqual(frame.types, ['A'])
self.assertTrue(all(frame.typeid == [0] * N))
self.assertEqual(frame.box, box_expected)
self.assert_raise_attribute_error(frame)
traj.load_arrays()
self.assert_raise_attribute_error(traj)
def test_incsim_dialect(self):
sample = io.StringIO(garnett.samples.POS_INCSIM)
traj = self.read_trajectory(sample)
box_expected = garnett.trajectory.Box(Lx=10, Ly=10, Lz=10)
for frame in traj:
N = len(frame)
self.assertEqual(frame.types, ['A'])
self.assertTrue(all(frame.typeid == [0] * N))
self.assertEqual(frame.box, box_expected)
self.assert_raise_attribute_error(frame)
traj.load_arrays()
self.assert_raise_attribute_error(traj)
def test_monotype_dialect(self):
sample = io.StringIO(garnett.samples.POS_MONOTYPE)
traj = self.read_trajectory(sample)
box_expected = garnett.trajectory.Box(Lx=10, Ly=10, Lz=10)
for frame in traj:
N = len(frame)
self.assertEqual(frame.types, ['A'])
self.assertTrue(all(frame.typeid == [0] * N))
self.assertEqual(frame.box, box_expected)
self.assert_raise_attribute_error(frame)
traj.load_arrays()
self.assert_raise_attribute_error(traj)
def test_injavis_dialect(self):
sample = io.StringIO(garnett.samples.POS_INJAVIS)
traj = self.read_trajectory(sample)
box_expected = garnett.trajectory.Box(Lx=10, Ly=10, Lz=10)
for frame in traj:
N = len(frame)
self.assertEqual(frame.types, ['A'])
self.assertTrue(all(frame.typeid == [0] * N))
self.assertEqual(frame.box, box_expected)
self.assert_raise_attribute_error(frame)
traj.load_arrays()
self.assert_raise_attribute_error(traj)
def test_default(self):
with TemporaryDirectory() as tmp_dir:
gsdfile = os.path.join(tmp_dir, 'testfile.gsd')
posfile = os.path.join(tmp_dir, 'testfile.pos')
with open(gsdfile, "wb") as f:
f.write(base64.b64decode(garnett.samples.GSD_BASE64))
with garnett.read(gsdfile) as traj:
with self.assertRaises(AttributeError):
traj[-1].shapedef
garnett.write(traj, posfile)
with garnett.read(posfile) as traj:
for frame in traj:
for name in frame.shapedef.keys():
self.assertEqual(frame.shapedef[name],
DEFAULT_SHAPE_DEFINITION)
@unittest.skipIf(not HPMC, 'requires HPMC')
class HPMCPosFileReaderTest(BasePosFileReaderTest):
def setUp(self):
self.tmp_dir = TemporaryDirectory()
self.addCleanup(self.tmp_dir.cleanup)
self.fn_pos = os.path.join(self.tmp_dir.name, 'test.pos')
def del_system(self):
del self.system
def del_mc(self):
del self.mc
def test_sphere(self):
if HOOMD_v1:
from hoomd_script import init, sorter, data, dump, run
self.system = init.create_empty(N=2, box=data.boxdim(
L=10, dimensions=2), particle_types=['A'])
self.addCleanup(init.reset)
else:
from hoomd import init, data, run, context, lattice
from hoomd.update import sort as sorter
from hoomd.deprecated import dump
self.system = init.create_lattice(
unitcell=lattice.sq(10), n=(2, 1))
self.addCleanup(context.initialize, "--mode=cpu")
hoomd.option.set_notice_level(0)
self.addCleanup(self.del_system)
self.mc = hpmc.integrate.sphere(seed=10)
self.mc.shape_param.set("A", diameter=1.0)
self.addCleanup(self.del_mc)
self.system.particles[0].position = (0, 0, 0)
self.system.particles[0].orientation = (1, 0, 0, 0)
self.system.particles[1].position = (2, 0, 0)
self.system.particles[1].orientation = (1, 0, 0, 0)
if HOOMD_v1:
sorter.set_params(grid=8)
else:
context.current.sorter.set_params(grid=8)
dump.pos(filename=self.fn_pos, period=1)
run(10, quiet=True)
with io.open(self.fn_pos, 'r', encoding='utf-8') as posfile:
traj = self.read_trajectory(posfile)
shape = traj[0].shapedef['A']
assert shape.shape_class == 'sphere'
assert np.isclose(shape.diameter, float(1.0))
def test_ellipsoid(self):
if HOOMD_v1:
from hoomd_script import init, sorter, data, dump, run
self.system = init.create_empty(N=2, box=data.boxdim(
L=10, dimensions=2), particle_types=['A'])
self.addCleanup(init.reset)
else:
from hoomd import init, data, run, context, lattice
from hoomd.update import sort as sorter
from hoomd.deprecated import dump
self.system = init.create_lattice(
unitcell=lattice.sq(10), n=(2, 1))
self.addCleanup(context.initialize, "--mode=cpu")
hoomd.option.set_notice_level(0)
self.addCleanup(self.del_system)
self.mc = hpmc.integrate.ellipsoid(seed=10)
a = 0.5
b = 0.25
c = 0.125
self.mc.shape_param.set("A", a=a, b=b, c=c)
self.addCleanup(self.del_mc)
self.system.particles[0].position = (0, 0, 0)
self.system.particles[0].orientation = (1, 0, 0, 0)
self.system.particles[1].position = (2, 0, 0)
self.system.particles[1].orientation = (1, 0, 0, 0)
if HOOMD_v1:
sorter.set_params(grid=8)
else:
context.current.sorter.set_params(grid=8)
pos_writer = dump.pos(filename=self.fn_pos, period=1)
self.mc.setup_pos_writer(pos_writer)
run(10, quiet=True)
with io.open(self.fn_pos, 'r', encoding='utf-8') as posfile:
self.read_trajectory(posfile)
def test_convex_polyhedron(self):
if HOOMD_v1:
from hoomd_script import init, sorter, data, dump, run
from hoomd_plugins import hpmc
self.system = init.create_empty(N=2, box=data.boxdim(
L=10, dimensions=2), particle_types=['A'])
self.addCleanup(init.reset)
else:
from hoomd import init, data, run, hpmc, context, lattice
from hoomd.update import sort as sorter
from hoomd.deprecated import dump
self.system = init.create_lattice(
unitcell=lattice.sq(10), n=(2, 1))
self.addCleanup(context.initialize, "--mode=cpu")
hoomd.option.set_notice_level(0)
self.addCleanup(self.del_system)
self.mc = hpmc.integrate.convex_polyhedron(seed=10)
self.addCleanup(self.del_mc)
shape_vertices = np.array([[-2, -1, -1], [-2, 1, -1], [-2, -1, 1],
[-2, 1, 1], [2, -1, -1], [2, 1, -1],
[2, -1, 1], [2, 1, 1]])
self.mc.shape_param.set("A", vertices=shape_vertices)
self.system.particles[0].position = (0, 0, 0)
self.system.particles[0].orientation = (1, 0, 0, 0)
self.system.particles[1].position = (2, 0, 0)
self.system.particles[1].orientation = (1, 0, 0, 0)
if HOOMD_v1:
sorter.set_params(grid=8)
else:
context.current.sorter.set_params(grid=8)
pos_writer = dump.pos(filename=self.fn_pos, period=1)
self.mc.setup_pos_writer(pos_writer)
run(10, quiet=True)
with io.open(self.fn_pos, 'r', encoding='utf-8') as posfile:
traj = self.read_trajectory(posfile)
shape = traj[0].shapedef['A']
assert shape.shape_class == 'poly3d'
assert np.array_equal(shape.vertices, shape_vertices)
@ddt
class PosFileWriterTest(BasePosFileWriterTest):
def test_hpmc_dialect(self):
sample = io.StringIO(garnett.samples.POS_HPMC)
traj = self.read_trajectory(sample)
dump = io.StringIO()
self.write_trajectory(traj, dump)
dump.seek(0)
traj_cmp = self.read_trajectory(dump)
self.assertEqual(traj, traj_cmp)
def test_incsim_dialect(self):
sample = io.StringIO(garnett.samples.POS_INCSIM)
traj = self.read_trajectory(sample)
dump = io.StringIO()
self.write_trajectory(traj, dump)
dump.seek(0)
traj_cmp = self.read_trajectory(dump)
self.assertEqual(traj, traj_cmp)
def test_monotype_dialect(self):
sample = io.StringIO(garnett.samples.POS_MONOTYPE)
traj = self.read_trajectory(sample)
dump = io.StringIO()
self.write_trajectory(traj, dump)
dump.seek(0)
traj_cmp = self.read_trajectory(dump)
self.assertEqual(traj, traj_cmp)
def test_injavis_dialect(self):
sample = io.StringIO(garnett.samples.POS_INJAVIS)
traj = self.read_trajectory(sample)
dump = io.StringIO()
self.write_trajectory(traj, dump)
dump.seek(0)
traj_cmp = self.read_trajectory(dump)
self.assertEqual(traj, traj_cmp)
def test_arrows(self):
from garnett.shapes import ArrowShape
sample = io.StringIO(garnett.samples.POS_INJAVIS)
traj = self.read_trajectory(sample)
traj.load_arrays()
for frame in traj:
frame.shapedef = {'A': ArrowShape()}
frame.orientation.T[3] = 0
dump = io.StringIO()
self.write_trajectory(traj, dump)
dump.seek(0)
traj_cmp = self.read_trajectory(dump)
self.assertEqual(traj, traj_cmp)
for frame in traj_cmp:
self.assertTrue(isinstance(
frame.shapedef['A'], ArrowShape))
def test_ellipsoid(self):
from garnett.shapes import EllipsoidShape
sample = io.StringIO(garnett.samples.POS_INJAVIS)
traj = self.read_trajectory(sample)
traj.load_arrays()
a = 0.5
b = 0.25
c = 0.125
for frame in traj:
frame.shapedef = {'A': EllipsoidShape(a=a, b=b, c=c)}
dump = io.StringIO()
self.write_trajectory(traj, dump)
dump.seek(0)
traj_cmp = self.read_trajectory(dump)
self.assertEqual(traj, traj_cmp)
for frame in traj_cmp:
self.assertTrue(isinstance(
frame.shapedef['A'], EllipsoidShape))
@unittest.skipIf(not IN_PATH, 'tests not executed from repository root')
@data(
'hpmc_sphere',
'hpmc_sphere_rotated',
'FeSiUC',
'Henzie_lithium_cubic_uc',
'Henzie_lithium_triclinic',
'cubic_onep',
'cubic_twop',
# 'hex_onep', # These tests are deactivated, because we currently
# 'hex_twop', # do not have a solution to keep the reference orientation
# 'rand_test', # the same. The systems are otherwise identical.
'scc',
'switch_FeSiUC',
'switch_scc',
'pos_2d')
def test_read_write_read(self, name):
fn = os.path.join(PATH, 'samples', name + '.pos')
with open(fn) as samplefile:
traj0 = self.read_trajectory(samplefile)
with tempfile.NamedTemporaryFile('w', suffix='.pos') as tmpfile:
self.write_trajectory(traj0, tmpfile, rotate=False)
tmpfile.flush()
with open(tmpfile.name) as tmpfile_read:
traj1 = self.read_trajectory(tmpfile_read)
for f0, f1 in zip(traj0, traj1):
self.assert_approximately_equal_frames(f0, f1)
@unittest.skipIf(not IN_PATH, 'tests not executed from repository root')
@data(
'hpmc_sphere',
'hpmc_sphere_rotated',
'xtalslice3_small',
'FeSiUC',
# For the following two, the box has a different sign...
# 'xtalslice3_small_rotated',
# 'switch_FeSiUC',
)
def test_read_write_read_rotated(self, name):
fn = os.path.join(PATH, 'samples', name + '.pos')
with open(fn) as samplefile:
traj0 = self.read_trajectory(samplefile)
with tempfile.NamedTemporaryFile('w', suffix='.pos') as tmpfile:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.write_trajectory(traj0, tmpfile, rotate=True)
tmpfile.flush()
with open(tmpfile.name) as tmpfile_read:
traj1 = self.read_trajectory(tmpfile_read)
for f0, f1 in zip(traj0, traj1):
self.assert_approximately_equal_frames(
f0, f1, decimals=4, atol=1e-6,
ignore_orientation=True # The shapes themselves are differently oriented
)
@unittest.skip("injavis is currently not starting correctly.")
class InjavisReadWriteTest(BasePosFileWriterTest):
def read_write_injavis(self, sample_file):
sample_file = io.StringIO(sample_file)
# account for low injavis precision
traj0 = self.read_trajectory(sample_file, precision=7)
with tempfile.NamedTemporaryFile('w', suffix='.pos') as tmpfile0:
with tempfile.NamedTemporaryFile('r', suffix='.pos') as tmpfile1:
self.write_trajectory(traj0, tmpfile0)
tmpfile0.flush()
subprocess.check_call(
['injavis', tmpfile0.name, '-o', tmpfile1.name])
traj1 = self.read_trajectory(tmpfile1)
# Injavis only writes last frame
frame0 = traj0[-1]
frame1 = traj1[-1]
# Injavis apparently ignores the color specification when writing
for frame in (frame0, frame1):
for name, shapedef in frame.shapedef.items():
shapedef.color = None
self.assertEqual(frame0, frame1)
def test_hpmc_dialect(self):
self.read_write_injavis(garnett.samples.POS_HPMC)
def test_incsim_dialect(self):
self.read_write_injavis(garnett.samples.POS_INCSIM)
def test_monotype_dialect(self):
self.read_write_injavis(garnett.samples.POS_MONOTYPE)
def test_injavis_dialect(self):
self.read_write_injavis(garnett.samples.POS_INJAVIS)
def test_hpmc_dialect_2d(self):
self.read_write_injavis(garnett.samples.POS_HPMC_2D)
def test_incsim_dialect_2d(self):
self.read_write_injavis(garnett.samples.POS_INCSIM_2D)
def test_monotype_dialect_2d(self):
self.read_write_injavis(garnett.samples.POS_MONOTYPE_2D)
if __name__ == '__main__':
context.initialize("--mode=cpu")
hoomd.option.set_notice_level(0)
unittest.main()
|
<gh_stars>0
from abc import ABC, abstractmethod
import numbers
import numexpr as ne
import numpy as np
from .. import metrics
from .. import minimizers
class Refocus(ABC):
def __init__(self, field, wavelength, pixel_size, medium_index=1.3333,
distance=0, kernel="helmholtz", padding=True):
r"""
Parameters
----------
field: 2d complex-valued ndarray
Input field to be refocused
wavelength: float
Wavelength of the used light [m]
pixel_size: float
Pixel size of the input image [m]
medium_index: float
Refractive index of the medium, defaults to water
(1.3333 at 21.5°C)
distance: float
Initial focusing distance [m]
kernel: str
Propagation kernel, one of
- "helmholtz": the optical transfer function
:math:`\exp\left(id\left(\sqrt{k_\mathrm{m}^2 - k_\mathrm{x}^2
- k_\mathrm{y}^2} - k_\mathrm{m}\right)\right)`
- "fresnel": paraxial approximation
:math:`\exp(-id(k_\mathrm{x}^2+k_\mathrm{y}^2)/2k_\mathrm{m})`
padding: bool
Whether to perform boundary-padding with linear ramp
"""
super(Refocus, self).__init__()
self.wavelength = wavelength
self.pixel_size = pixel_size
self.medium_index = medium_index
self.distance = distance
self.kernel = kernel
self.padding = padding
self.origin = field
self.fft_origin = self._init_fft(field, padding)
@property
def shape(self):
"""Shape of the padded input field or Fourier transform"""
return self.fft_origin.shape
@abstractmethod
def _init_fft(self, field, padding):
"""Initialize Fourier transform for propagation
This is where you would compute the initial Fourier transform.
E.g. for FFTW, you would do planning here.
Parameters
----------
field: 2d complex-valued ndarray
Input field to be refocused
padding: bool
Whether to perform boundary-padding with linear ramp
Returns
-------
fft_field0: 2d complex-valued ndarray
Fourier transform the initial field
Notes
-----
Any subclass should perform padding with
:func:`nrefocus.pad.padd_add` during initialization.
"""
def autofocus(self, interval, metric="average gradient", minimizer="lmfit",
roi=None, minimizer_kwargs=None, ret_grid=False,
ret_field=False):
"""Autofocus the initial field
Parameters
----------
interval: tuple of floats
Approximate interval to search for optimal focus [m]
metric: str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
minimizer: str
- "legacy": custom nrefocus minimizer
- "lmfit": lmfit-based minimizer (uses :func:`lmfit.minimize
<lmfit.minimizer.minimize>`)
roi: list or tuple or slice or ndarray
Region of interest for which the metric will be minimized.
This can be either a list [x1, y1, x2, y2], a tuple or
list of slices or a numpy indexing array. If not given,
the entire field will be used.
minimizer_kwargs: dict
Any additional keyword arguments for the minimizer
ret_grid: bool
return focus positions and metric values of the coarse
grid search
ret_field: bool
return the optimal refocused field for user convenience
Returns
-------
af_distance: float
Autofocusing distance
(d_grid, metrid_grid): ndarray
Coarse grid search values (only if `ret_grid` is True)
af_field: ndarray
Autofocused field (only if `ret_field` is True)
[other]:
Any other objects returned by `minimizer`; may be definable
via `minimizer_kwargs` (depends on minimizer)
"""
if minimizer_kwargs is None:
minimizer_kwargs = {}
# flip interval for user convenience
if interval[0] > interval[1]:
interval = (interval[1], interval[0])
# construct the correct ROI
if (isinstance(roi, (list, tuple))
and isinstance(roi[0], numbers.Number)):
# We have a list of [x1, y1, x2, y2]
if len(roi) == 2:
roi = slice(roi[0], roi[1])
elif len(roi) == 4:
roi = (slice(roi[0], roi[2]), slice(roi[1], roi[3]))
else:
raise ValueError(f"Unexpected valud for `roi`: '{roi}'")
elif roi is None:
# Use all the data
roi = slice(None, None)
metric_func = metrics.METRICS[metric]
minimize_func = minimizers.MINIMIZERS[minimizer]
af_data = minimize_func(
rf=self,
metric_func=metric_func,
interval=interval,
roi=roi,
ret_grid=ret_grid,
ret_field=ret_field,
**minimizer_kwargs)
return af_data
def get_kernel(self, distance):
"""Return the current kernel
Ther kernel type `self.kernel` is used
(see :func:`Refocus.__init__`)
"""
nm = self.medium_index
res = self.wavelength / self.pixel_size
d = (distance - self.distance) / self.pixel_size
twopi = 2 * np.pi
km = twopi * nm / res
kx = (np.fft.fftfreq(self.fft_origin.shape[0]) * twopi).reshape(-1, 1)
ky = (np.fft.fftfreq(self.fft_origin.shape[1]) * twopi).reshape(1, -1)
if self.kernel == "helmholtz":
# unnormalized: exp(i*d*sqrt(km²-kx²-ky²))
root_km = ne.evaluate("km ** 2 - kx**2 - ky**2",
local_dict={"kx": kx,
"ky": ky,
"km": km})
rt0 = ne.evaluate("root_km > 0")
# multiply by rt0 (filter in Fourier space)
fstemp = ne.evaluate(
"exp(1j * d * (sqrt(root_km * rt0) - km)) * rt0",
local_dict={"root_km": root_km,
"rt0": rt0,
"km": km,
"d": d}
)
elif self.kernel == "fresnel":
# unnormalized: exp(i*d*(km-(kx²+ky²)/(2*km))
fstemp = ne.evaluate("exp(-1j * d * (kx**2 + ky**2) / (2 * km))",
local_dict={"kx": kx,
"ky": ky,
"km": km,
"d": d})
else:
raise KeyError(f"Unknown propagation kernel: '{self.kernel}'")
return fstemp
@abstractmethod
def propagate(self, distance):
"""Propagate the initial field to a certain distance
Parameters
----------
distance: float
Absolute focusing distance [m]
Returns
-------
refocused_field: 2d ndarray
Initial field refocused at `distance`
Notes
-----
Any subclass should perform padding with
:func:`nrefocus.pad.pad_rem` during initialization.
"""
|
<reponame>tango4j/loss-balance
import torch
import numpy as np
import ipdb
from utils import *
from losses import ContrastiveLoss_mod as const_loss
import copy
import operator
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
from pylab import logspace
import matplotlib as mpl
import warnings
warnings.filterwarnings("ignore")
def cp(x):
return copy.deepcopy(x)
def minmax_norm(row):
return (row - np.min(row))/(np.max(row) - np.min(row))
def get_pdf(losses, bins=None, n_bins=20):
'''
Get pdf from histogram
Input: loss values, 1-d numpy array
Output pdf: pdf that is obtained by normalizing the histogram
Output hist_raw: histogram that is not normalized by the total sum
Output bins: bins used for histogram
'''
max_L = np.max(losses)
max_h = (max_L + max_L/n_bins)
if type(bins) != type(np.array(10)):
bins = np.linspace(0, max_h, n_bins+1)
hist_raw, bins = np.histogram(losses, bins)
pdf = hist_raw/np.sum(hist_raw)
assert len(pdf) == n_bins == len(bins)-1, "The length of given pdf, bins and n_bins is not consistent."
return pdf, hist_raw, bins
def get_KL_div(p, q):
'''
Perform KL divergence for p and q.
Input p: 1-d numpy array
Input q: 1-d numpy array
Output: a scalar value
'''
assert len(p) == len(q)
eps = 1e-10 * np.ones_like(p) # This is for avoiding div by zero.
return np.sum(np.dot(p, np.log(( p + eps) / (q+ eps) ) ))
def run_pretrain_task(index_tup, loss_tup, batch_hist_list):
'''
A function that performs tasks at epoch -1
Currently calculates KL div for every iteration
Input index_tup:
Input loss_tup:
Input batch_hist_list:
Output batch_pdf_cst:
Output batch_pdf_ce:
Output batch_hist_list:
Output var_init_tup:
'''
epoch, batch_idx = index_tup
iter_loss_cst, iter_loss_ce = loss_tup
batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight = batch_hist_list
pdf_cst, hist_cst, org_bins_cst = get_pdf(iter_loss_cst, bins=None)
pdf_ce, hist_ce, org_bins_ce = get_pdf(iter_loss_ce, bins=None)
if batch_idx >= 0:
KL_val, max_KL_mw, prev_weight = 0, 0.5, 0.5
org_mixed_pdf, mixed_hist_raw, org_mixed_bins = get_weighted_pdfs(iter_loss_cst, iter_loss_ce, mixed_bins=None)
pdf_cst, hist_cst, new_bins_cst = get_pdf(iter_loss_cst, bins=org_mixed_bins)
pdf_ce, hist_ce, new_bins_ce = get_pdf(iter_loss_ce, bins=org_mixed_bins)
batch_pdf_cst[epoch][batch_idx] = ( pdf_cst, hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (cp(epoch), cp(batch_idx)) )
batch_pdf_ce[epoch][batch_idx] = ( pdf_ce, hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (cp(epoch), cp(batch_idx)) )
else:
if batch_pdf_cst[epoch][batch_idx-1] != []:
(_, org_hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (ref_epoch, ref_batch_idx)) = batch_pdf_cst[epoch][batch_idx-1]
(_, org_hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (ref_epoch, ref_batch_idx)) = batch_pdf_ce[epoch][batch_idx-1]
max_KL_mw, KL_val = get_max_KL_mw(iter_loss_cst, iter_loss_ce, org_mixed_bins, org_mixed_pdf)
org_mixed_pdf, mixed_hist_raw, org_mixed_bins = get_weighted_pdfs(iter_loss_cst, iter_loss_ce, mixed_bins=None)
batch_pdf_cst[epoch][batch_idx] = ( pdf_cst, hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (cp(epoch), cp(batch_idx)) )
batch_pdf_ce[epoch][batch_idx] = ( pdf_ce, hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (cp(epoch), cp(batch_idx)) )
batch_hist_list = [batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight]
return batch_pdf_cst, batch_pdf_ce, batch_hist_list, (max_KL_mw, KL_val, prev_weight)
def run_epoch_0_task(index_tup, loss_tup, trInst):
'''
A function that performs tasks at the first epoch 0
Currently calculates KL div for every iteration
Input index_tup:
Input loss_tup:
Input batch_hist_list:
Output batch_pdf_cst:
Output batch_pdf_ce:
Output batch_hist_list:
Output var_init_tup:
'''
epoch, batch_idx = index_tup
iter_loss_cst, iter_loss_ce = loss_tup
# batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight = batch_hist_list
pdf_cst, hist_cst, org_bins_cst = get_pdf(iter_loss_cst, bins=None)
pdf_ce, hist_ce, org_bins_ce = get_pdf(iter_loss_ce, bins=None)
# if batch_idx == 0:
# KL_val, max_KL_mw, prev_weight = 0, 0.5, 0.5
# org_mixed_pdf, mixed_hist_raw, org_mixed_bins = get_weighted_pdfs(iter_loss_cst, iter_loss_ce, mixed_bins=None)
# # lh_loss_cst, lh_loss_ce = get_lookahead_pdfs(index_tup, loss_tup, trInst)
# # org_mixed_pdf, mixed_hist_raw, org_mixed_bins = get_weighted_pdfs(lh_loss_cst, lh_loss_ce, mixed_bins=org_mixed_bins)
# else:
# # print(" -------- Getting stat from prev iter")
# # ipdb.set_trace()
# if trInst.batch_pdf_cst[epoch][batch_idx-1] != []:
# (_, org_hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (ref_epoch, ref_batch_idx)) = trInst.batch_pdf_cst[epoch][batch_idx-1]
# (_, org_hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (ref_epoch, ref_batch_idx)) = trInst.batch_pdf_ce[epoch][batch_idx-1]
# # lh_loss_cst, lh_loss_ce = get_lookahead_pdfs(index_tup, loss_tup, trInst)
# # org_mixed_pdf, mixed_hist_raw, org_mixed_bins = get_weighted_pdfs(lh_loss_cst, lh_loss_ce, mixed_bins=org_mixed_bins)
# max_KL_mw, KL_val = get_max_KL_mw(iter_loss_cst, iter_loss_ce, org_mixed_bins, org_mixed_pdf)
lh_loss_cst, lh_loss_ce = get_lookahead_pdfs(index_tup, loss_tup, trInst)
org_mixed_pdf, lh_hist_raw, org_mixed_bins = get_weighted_pdfs(lh_loss_cst, lh_loss_ce, mixed_bins=None)
max_KL_mw, KL_val = get_max_KL_mw_lh(iter_loss_cst, iter_loss_ce, org_mixed_bins, org_mixed_pdf)
trInst.batch_pdf_cst[epoch][batch_idx] = ( pdf_cst, hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (cp(epoch), cp(batch_idx)) )
trInst.batch_pdf_ce[epoch][batch_idx] = ( pdf_ce, hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (cp(epoch), cp(batch_idx)) )
save_hist(iter_loss_cst, iter_loss_ce, epoch, batch_idx)
# batch_hist_list = [batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight]
var_init_tup = (max_KL_mw, KL_val)
return trInst, var_init_tup
def run_epoch_1_task(index_tup, loss_tup, trInst):
# print("run_epoch_1_task:", total_samples)
'''
A function that performs tasks after epoch 1
Input index_tup:
Input loss_tup:
Input batch_hist_list:
Input total_samples:
Output batch_pdf_cst:
Output batch_pdf_ce:
Output batch_hist_list:
Output var_rest_tup:
'''
epoch, batch_idx = index_tup
iter_loss_cst, iter_loss_ce = loss_tup
# batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight = batch_hist_list
prev_mixed_pdf_list = []
# epoch_ceil = epoch
epoch_ceil = 1
for epoch_idx in range(0, epoch_ceil):
(_, org_hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (ref_epoch, ref_batch_idx)) = trInst.batch_pdf_cst[epoch_idx][batch_idx]
(_, org_hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (ref_epoch, ref_batch_idx)) = trInst.batch_pdf_ce[epoch_idx][batch_idx]
prev_mixed_pdf_list.append(org_mixed_pdf)
lh_loss_cst, lh_loss_ce = get_lookahead_pdfs(index_tup, loss_tup, trInst)
lh_mixed_pdf, lh_hist_raw, lh_mixed_bins = get_weighted_pdfs(lh_loss_cst, lh_loss_ce, mixed_bins=org_mixed_bins)
### Get max_KL_mw from two loss vectors and ref_mixed_bins
# max_KL_mw, KL_val = get_max_KL_mw(iter_loss_cst, iter_loss_ce, org_mixed_bins, org_mixed_pdf)
# max_KL_mw, KL_val = get_max_KL_mw_lh(iter_loss_cst, iter_loss_ce, org_mixed_bins, org_mixed_pdf)
# max_KL_mw, KL_val = get_max_KL_mw_from_list(iter_loss_cst, iter_loss_ce, org_mixed_bins, prev_mixed_pdf_list)
max_KL_mw, KL_val = get_max_KL_mw_from_list_and_lh(iter_loss_cst, iter_loss_ce, org_mixed_bins, prev_mixed_pdf_list, lh_mixed_pdf)
### Get pdfs
pdf_cst, hist_cst, new_bins_cst = get_pdf(iter_loss_cst, bins=org_bins_cst)
pdf_ce, hist_ce, new_bins_ce = get_pdf(iter_loss_ce, bins=org_bins_ce)
### Get weighted pdfs for reference pdfs
org_mixed_pdf, mixed_hist_raw, org_mixed_bins = get_weighted_pdfs(iter_loss_cst, iter_loss_ce, mixed_bins=org_mixed_bins)
trInst.batch_pdf_cst[epoch][batch_idx] = (pdf_cst, hist_cst, org_bins_cst, org_mixed_pdf, org_mixed_bins, (epoch, batch_idx))
trInst.batch_pdf_ce[epoch][batch_idx] = (pdf_ce, hist_ce, org_bins_ce, org_mixed_pdf, org_mixed_bins, (epoch, batch_idx))
### For the circulation of mw values
# mv_mw_sum, kl_mw_sum, total_samples, prev_weight = mw_batch_list
# batch_hist_list = [batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight]
### Compare reference pdf and current distribution
# print("Ref {}-{}:".format(ref_epoch, ref_batch_idx), org_hist_cst,"\nCur {}-{}:".format(epoch, batch_idx), hist_cst)
# print("Ref {}-{}:".format(ref_epoch, ref_batch_idx), org_hist_ce, "\nCur {}-{}:".format(epoch, batch_idx), hist_ce)
save_hist(iter_loss_cst, iter_loss_ce, epoch, batch_idx)
var_rest_tup = (max_KL_mw, KL_val)
return trInst, var_rest_tup
def save_hist(iter_loss_cst, iter_loss_ce, epoch, batch_idx):
path="/sam/home/inctrl/Dropbox/Special Pics/research_pics/loss_bal/test_e{}_i{}.png".format(epoch, batch_idx)
# axis = logspace(-1e10,0, 10)
axis = 100
plt.hist2d(iter_loss_ce, iter_loss_cst, bins=(axis, axis), norm=mpl.colors.LogNorm() , cmap='BuGn', range=[ [0, 8.0 ], [0.0, 1.8]] )
# plt.hist2d(iter_loss_ce, iter_loss_cst, bins=(axis, axis), cmap='Blues')
plt.xlabel("Cross Entropy")
plt.ylabel("Constrasive Loss")
plt.savefig(path)
plt.cla()
plt.clf()
plt.close()
def tonp(x):
return x.detach().cpu().numpy()
def get_feedback(target, distance, thres = 1.0):
outputs_np = distance
target_np = target[0]
pred = outputs_np.ravel() < thres
sample_feedback = list(pred == target_np)
return sample_feedback
def get_lookahead_pdfs(index_tup, loss_tup, trInst):
with torch.no_grad():
(loss_fn, loss_fn_ce) = trInst.loss_fn_tup
(outraw1, outraw2) = trInst.outputs
lh = trInst.margin_LH
lh = torch.tensor(lh).cuda(trInst.gpu)
label1, label2 = trInst.label1, trInst.label2
target = trInst.target
target_float = trInst.target.float()
target_rep= target_float.repeat(2,1).t()
outraw_mean = 0.5 * (outraw1 + outraw2)
outraw1_lh = target_rep *(lh*outraw_mean + (1-lh)*outraw1) + (1-target_rep)*(outraw1 + lh*(outraw1-outraw_mean))
outraw2_lh = target_rep *(lh*outraw_mean + (1-lh)*outraw2) + (1-target_rep)*(outraw2 + lh*(outraw2-outraw_mean))
# ipdb.set_trace()
data_lh = trInst.data + (outraw1_lh, outraw2_lh)
trInst.model.eval()
output1, output2, score1, score2 = trInst.model(*data_lh)
# loss_preprocessing_args = (output1, output2, score1, score2, trInst.label1, trInst.label2, trInst.target)
outputs = (outraw1_lh, outraw2_lh)
outputs_ce1 = (score1,)
outputs_ce2 = (score2,)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs_cst = outputs
if target is not None:
target = (target,)
loss_inputs_cst += target
loss_inputs_ce1 = outputs_ce1
loss_inputs_ce2 = outputs_ce2
if label1 is not None and label2 is not None:
loss_inputs_ce1 += (label1,)
loss_inputs_ce2 += (label2,)
# loss_inputs_cst, loss_inputs_ce1, loss_inputs_ce2, _ = loss_input_process(*loss_preprocessing_args)
loss_outputs, distance, losses_const = loss_fn(*loss_inputs_cst)
loss_outputs_ce1, losses_ce1 = loss_fn_ce(*loss_inputs_ce1)
loss_outputs_ce2, losses_ce2 = loss_fn_ce(*loss_inputs_ce2)
if torch.sum(losses_const).detach().cpu().numpy() > np.sum(loss_tup[0]):
print("Old CST loss", np.sum(loss_tup[0]), "New CST loss:", torch.sum(losses_const) )
if torch.sum(losses_ce1+losses_ce2).detach().cpu().numpy() > np.sum(loss_tup[1]):
print("Old CE_ loss", np.sum(loss_tup[1]), "New CE_ loss:", torch.sum(losses_ce1+losses_ce2) )
# ipdb.set_trace()
return tonp(losses_const), tonp(losses_ce1 + losses_ce2)
def get_weighted_pdfs(losses1, losses2, mixed_bins=None, n_bins=500):
'''
Get weighted pdfs for the future use.
This weights the two set loss values for "n_bins" of different weight values.
Input losses1: 1-d numpy array of loss values
Input losses2: 1-d numpy array of loss values
Input mixed_bins: Sometimes you need to specify bins to calculate KL div
Output mixed_pdf: dictionary that constains mixed pdf with weight "mw"
Output mixed_hist_raw: Histogram before normalization
Output mixed_bins: original histogram bins for the future use
'''
itv = 1.0/n_bins
max_ceil = 1.0 + itv
mixed_pdf, mixed_hist_raw, mixed_bins = {}, {}, {}
for mw in np.arange(0, max_ceil, itv):
loss_mix = mw * losses1 + (1-mw) * losses2
mixed_pdf[mw], mixed_hist_raw[mw], mixed_bins[mw] = get_pdf(loss_mix, bins=mixed_bins)
return mixed_pdf, mixed_hist_raw, mixed_bins
def get_KL_values_from_pdfs(p_pdf_list, q_pdf_list):
'''
Get KL vlaues for each mix weight "mw"
p_pdf: target pdf
q_pdf: refernece pdf
'''
# Input is two dictionaries containing pdf for each mw
assert len(p_pdf_list.items()) == len(q_pdf_list.items()), "Two dictionaries have different size."
KL_values_dict = {}
for mw, p_pdf in p_pdf_list.items():
q_pdf = q_pdf_list[mw]
KL_values_dict[mw] = get_KL_div(p_pdf, q_pdf)
return KL_values_dict
def get_KL_values_from_list_of_pdfs(p_pdf_list, q_pdf_list_from_epoches):
'''
Similar to get_KL_values_from_pdfs() but getting KL div values from multiple of ref q_pdfs
Input p_pdf: target pdf
Input q_pdf_list_from_epoches: a list contains multiple of q_pdfs
Input q_pdf: refernece pdf
'''
# Input is two dictionaries containing pdf for each mw.
assert len(p_pdf_list.items()) == len(q_pdf_list_from_epoches[0].items()), "Two dictionaries have different size."
KL_values_dict = {}
for epoch_idx, q_pdf_list in enumerate(q_pdf_list_from_epoches):
for mw, p_pdf in p_pdf_list.items():
q_pdf = q_pdf_list[mw]
if mw not in KL_values_dict:
KL_values_dict[mw] = get_KL_div(p_pdf, q_pdf)
else:
KL_values_dict[mw] += get_KL_div(p_pdf, q_pdf)
return KL_values_dict
def get_KL_values_from_list_of_pdfs_and_lh(p_pdf_list, q_pdf_list_from_epoches, r_pdf_list):
'''
Similar to get_KL_values_from_pdfs() but getting KL div values from multiple of ref q_pdfs
Input p_pdf: target pdf
Input q_pdf_list_from_epoches: a list contains multiple of q_pdfs
Input q_pdf: refernece pdf
'''
# Input is two dictionaries containing pdf for each mw.
assert len(p_pdf_list.items()) == len(q_pdf_list_from_epoches[0].items()), "Two dictionaries have different size."
KL_values_dict = {}
for epoch_idx, q_pdf_list in enumerate(q_pdf_list_from_epoches):
for mw, p_pdf in p_pdf_list.items():
q_pdf = q_pdf_list[mw]
r_pdf_lh = r_pdf_list[mw]
if mw not in KL_values_dict:
KL_values_dict[mw] = get_KL_div(p_pdf, q_pdf) - get_KL_div(r_pdf_lh, p_pdf)
else:
KL_values_dict[mw] += get_KL_div(p_pdf, q_pdf) - get_KL_div(r_pdf_lh, p_pdf)
return KL_values_dict
def get_argmax_KL_dict(KL_values_dict):
'''
Argmax function for the dictionary input KL_values_dict
'''
assert len(KL_values_dict.items()) != 0, "KL_values_dict is empty."
argmax_mw = max(KL_values_dict.items(), key=operator.itemgetter(1))[0]
return argmax_mw, KL_values_dict[argmax_mw]
def get_argmin_KL_dict(KL_values_dict):
'''
Argmin function for the dictionary input KL_values_dict
'''
assert len(KL_values_dict.items()) != 0, "KL_values_dict is empty."
argmin_mw = min(KL_values_dict.items(), key=operator.itemgetter(1))[0]
return argmin_mw, KL_values_dict[argmin_mw]
def get_max_KL_mw(loss1, loss2, ref_mixed_bins, ref_mixed_pdf):
'''
High level function that returns "max_KL_mw" and "KL_val"
'''
new_mixed_pdf, _, _ = get_weighted_pdfs(loss1, loss2, ref_mixed_bins)
KL_values_dict = get_KL_values_from_pdfs(new_mixed_pdf, ref_mixed_pdf)
max_KL_mw, KL_val = get_argmax_KL_dict(KL_values_dict)
return max_KL_mw, KL_val
def get_max_KL_mw_lh(loss1, loss2, ref_mixed_bins, ref_mixed_pdf):
'''
High level function that returns "max_KL_mw" and "KL_val"
'''
new_mixed_pdf, _, _ = get_weighted_pdfs(loss1, loss2, ref_mixed_bins)
KL_values_dict = get_KL_values_from_pdfs(new_mixed_pdf, ref_mixed_pdf)
min_KL_mw, KL_val = get_argmin_KL_dict(KL_values_dict)
return min_KL_mw, KL_val
def get_max_KL_mw_from_list(loss1, loss2, ref_mixed_bins, ref_mixed_pdf_from_epoches):
'''
High level function that returns "max_KL_mw" and "KL_val"
'''
new_mixed_pdf, _, _ = get_weighted_pdfs(loss1, loss2, ref_mixed_bins)
KL_values_dict = get_KL_values_from_list_of_pdfs(new_mixed_pdf, ref_mixed_pdf_from_epoches)
max_KL_mw, KL_val = get_argmax_KL_dict(KL_values_dict)
return max_KL_mw, KL_val
def get_max_KL_mw_from_list_and_lh(loss1, loss2, ref_mixed_bins, ref_mixed_pdf_from_epoches, ref_mixed_pdf_lh):
'''
High level function that returns "max_KL_mw" and "KL_val"
'''
new_mixed_pdf, _, _ = get_weighted_pdfs(loss1, loss2, ref_mixed_bins)
KL_values_dict = get_KL_values_from_list_of_pdfs_and_lh(new_mixed_pdf, ref_mixed_pdf_from_epoches, ref_mixed_pdf_lh)
max_KL_mw, KL_val = get_argmax_KL_dict(KL_values_dict)
return max_KL_mw, KL_val
def define_vars_for_MW_est(length_of_data_loader, max_epoch=20, initial_weight=0.5):
batch_pdf_cst = { i:{ x:[] for x in range(length_of_data_loader) } for i in range(max_epoch) }
batch_pdf_ce = { i:{ x:[] for x in range(length_of_data_loader) } for i in range(max_epoch) }
mw_batch_list = [None]*4
prev_weight = initial_weight
batch_hist_list = [batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight]
return batch_hist_list
class TrInst:
def __init__(self,gpu, seed, loss_fn_tup, length_of_data_loader, initial_weight):
self.seed = seed
self.model = None
self.data = None
self.outputs = None
self.target = None
self.gpu = gpu
self.max_epoch = 20
self.total_samples = 0
self.mv_mw_sum = 0
self.kl_mw_sum = 0
self.cum_MV_weight = initial_weight
self.cum_KL_weight = initial_weight
self.batch_pdf_cst = { i:{ x:[] for x in range(length_of_data_loader) } for i in range(self.max_epoch) }
self.batch_pdf_ce = { i:{ x:[] for x in range(length_of_data_loader) } for i in range(self.max_epoch) }
self.mw_batch_list = [None]*4
self.initial_weight = initial_weight
self.prev_weight = self.initial_weight
self.metric_instances = None
self.loss_fn_tup = loss_fn_tup
self.margin_LH = 0.01
'''
Minimum Variance Method
'''
def min_var(X):
'''
Input: Should be M stacked rows M x N
Output: length M list with M weights.
'''
om = np.ones((X.shape[0],1))
cov_mat = np.cov(X)
inv_com = np.linalg.inv(cov_mat)
weight_mat = np.matmul(inv_com,om)/np.matmul(np.matmul(om.T,inv_com), om )
weight_mat = weight_mat.T
# weight_mat = np.expand_dims(ptf_m, axis = 0)
return weight_mat[0]
def get_min_var_result(loss1, loss2):
'''
Perform normalization for minimum variance criterion
Input loss1 and loss2:
'''
LossCst, LossCE = loss1/np.max(loss1), (loss2)/np.max(loss2)
X = np.vstack((LossCst, LossCE))
wm_norm = min_var(X)
wm = wm_norm
wm = [ wm[0]/np.max(loss1), wm[1]/np.max(loss2)]
wm = [ wm[0]/sum(wm), wm[1]/sum(wm)]
return wm
def loss_input_process(*args):
output1, output2, score1, score2, label1, label2, target = args
outputs = (output1, output2)
outputs_ce1 = (score1,)
outputs_ce2 = (score2,)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs_cst = outputs
if target is not None:
target = (target,)
loss_inputs_cst += target
loss_inputs_ce1 = outputs_ce1
loss_inputs_ce2 = outputs_ce2
if label1 is not None and label2 is not None:
loss_inputs_ce1 += (label1,)
loss_inputs_ce2 += (label2,)
outputs_tuple = (outputs, outputs_ce1, outputs_ce2)
return loss_inputs_cst, loss_inputs_ce1, loss_inputs_ce2, outputs_tuple
def fit_siam(gpu, train_loader, val_loader, model_org_pack, model, loss_fn_tup, optimizer, scheduler, n_epochs, cuda, log_interval, mix_weight, MVLW, metric_classes=[], seed=0, start_epoch=0):
model_org, optimizer_org, scheduler_org = model_org_pack
for epoch in range(0, start_epoch):
scheduler.step()
scheduler_org.step()
batch_hist_list = define_vars_for_MW_est(len(train_loader), max_epoch=20, initial_weight=0.5)
start_epoch = 0
if MVLW:
mix_weight = 1.0
mix_weight = 0.5
trInst = TrInst(gpu=gpu,
seed=seed,
loss_fn_tup=loss_fn_tup,
length_of_data_loader=len(train_loader),
initial_weight=mix_weight)
for epoch in range(start_epoch, n_epochs):
scheduler.step()
scheduler_org.step()
# Train stage
np.random.seed(seed)
print("\nTraining... ")
train_loss, vcel1, vcel2, metrics, mix_weight, batch_hist_list, mix_weight_list = train_siam_epoch(gpu, train_loader, epoch, model_org_pack, model, loss_fn_tup, optimizer, cuda, log_interval, metric_classes, trInst, mix_weight, MVLW)
message = '[seed: {} mixw: {:.4f}] Epoch: {}/{}. Train set: Const loss: {:.4f} CE-loss1 {:.4f} CE-loss2 {:.4f}'.format(trInst.seed, mix_weight, epoch + 1, n_epochs, train_loss, vcel1, vcel2)
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
print("\nTesting... ")
val_loss, vcel1, vcel2, metrics = test_siam_epoch(gpu, val_loader, epoch, model_org_pack, model, loss_fn_tup, cuda, metric_classes, trInst, MVLW)
val_loss /= len(val_loader)
message += '\n[seed: {} mixw: {:.4f}] Epoch: {}/{}. Validation set: Const loss: {:.4f} CE-loss1 {:.4f} CE-loss2 {:.4f}'.format(trInst.seed, mix_weight, epoch + 1, n_epochs, val_loss, vcel1, vcel2)
write_var = "{}, {:.4f}, {}, {}, {:.5f}, {:.4f}, {:.4f}".format(trInst.seed, mix_weight, epoch + 1, n_epochs, val_loss, vcel1, vcel2)
for metric in metrics:
message += ' {}: {}'.format(metric.name(), metric.value())
write_var += ', {}'.format(metric.value())
print(message)
return write_var, mix_weight, mix_weight_list
def train_siam_epoch(gpu, train_loader, epoch, model_org_pack, model, loss_fn_tup, optimizer, cuda, log_interval, metric_classes, trInst, mix_weight, MVLW=0):
metric_instances=[]
for metric_class in metric_classes:
metric_instance = metric_class()
metric_instances.append(metric_instance)
model_org, optimizer_org, scheduler_org = model_org_pack
model_org.train()
model.train()
losses = []
total_loss, ce_loss1, ce_loss2= 0, 0, 0
loss_list=[ [], [] ]
KL_cum_list = []
org_mixed_bins, org_mixed_bins = [], []
for batch_idx, (data, target, label1, label2) in enumerate(train_loader):
# ipdb.set_trace()
# print("label1", label1, "label2", label2)
iter_loss_list = [ [], [] ]
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if cuda:
data = tuple(d.cuda(trInst.gpu) for d in data)
if target is not None:
target = target.cuda(trInst.gpu)
label1 = label1.cuda(trInst.gpu)
label2 = label2.cuda(trInst.gpu)
mix_weight = torch.tensor(mix_weight).cuda(trInst.gpu)
mix_weight.requires_grad = False
optimizer_org.zero_grad()
optimizer.zero_grad()
data_siam = data + (None, None)
output1, output2, score1, score2 = model(*data_siam)
output1, output2 = model_org(*data)
# for param_group in optimizer.param_groups:
# print("============ LEARNING RATE:", param_group["step_size"])
outputs = (output1, output2)
outputs_ce1 = (score1,)
outputs_ce2 = (score2,)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs_cst = outputs
if target is not None:
target = (target,)
loss_inputs_cst += target
loss_inputs_ce1 = outputs_ce1
loss_inputs_ce2 = outputs_ce2
if label1 is not None and label2 is not None:
loss_inputs_ce1 += (label1,)
loss_inputs_ce2 += (label2,)
# outputs_tuple = (outputs, outputs_ce1, outputs_ce2)
# (outputs, outputs_ce1, outputs_ce2) = outputs_tuple
### Put data, target, output into trInst
assert label1.shape[0] == score1.shape[0], "Label and score dimension should match."
trInst.model, trInst.data, trInst.target, trInst.outputs, trInst.metric_instances, trInst.label1, trInst.label2= \
(model, data, target[0], (output1, output2), metric_instances, label1, label2)
loss_fn, loss_fn_ce = loss_fn_tup
loss_outputs, distance, losses_const = loss_fn(*loss_inputs_cst)
loss_outputs_ce1, losses_ce1 = loss_fn_ce(*loss_inputs_ce1)
loss_outputs_ce2, losses_ce2 = loss_fn_ce(*loss_inputs_ce2)
loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs
losses = [loss_outputs.item(), loss_outputs_ce1.item(), loss_outputs_ce2.item()]
total_loss += loss_outputs.item()
ce_loss1 += loss_outputs_ce1.item()
ce_loss2 += loss_outputs_ce2.item()
lcst, lce1, lce2 = (losses_const.detach().cpu().numpy(), losses_ce1.detach().cpu().numpy(), losses_ce2.detach().cpu().numpy())
# MVLW=False
if epoch <= 20 and MVLW:
iter_loss_cst = lcst
iter_loss_ce = lce1 + lce2
wm = get_min_var_result(iter_loss_cst, iter_loss_ce)
if epoch == 0:
trInst, var_init_tup = run_epoch_0_task((epoch, batch_idx), (iter_loss_cst, iter_loss_ce), trInst)
(max_KL_mw, KL_val) = var_init_tup
else:
trInst, var_rest_tup = run_epoch_1_task((epoch, batch_idx), (iter_loss_cst, iter_loss_ce), trInst)
(max_KL_mw, KL_val) = var_rest_tup
### decay=epoch for decaying values
decay = 0 # decay=epoch for decaying values
# decay = 0.3* epoch
if epoch >= 0 :
trInst.mv_mw_sum += wm[0] * losses_const.shape[0] * np.exp(-1*decay)
trInst.kl_mw_sum += max_KL_mw * losses_const.shape[0] * np.exp(-1*decay)
trInst.total_samples += losses_const.shape[0] * np.exp(-1*decay)
# print("total_samples:", trInst.total_samples, "kl_mw_sum:", trInst.kl_mw_sum)
trInst.cum_MV_weight = round(float(trInst.mv_mw_sum/trInst.total_samples), 4)
trInst.cum_KL_weight = round(float(trInst.kl_mw_sum/trInst.total_samples), 4)
if epoch == 0:
# mix_weight = wm[0]
# mix_weight = max_KL_mw
# mix_weight = cp(trInst.cum_KL_weight)
# trInst.prev_weight = mix_weight
# mix_weight = cp(trInst.prev_weight)
mix_weight = cp(trInst.initial_weight)
# mix_weight = cp(trInst.cum_MV_weight)
# trInst.prev_weight = trInst.cum_KL_weight
else:
# elif epoch <= :
# mix_weight = cp(cum_KL_weight)
# mix_weight = cp(cum_MV_weight)
mix_weight = cp(trInst.prev_weight)
# mix_weight = cp(trInst.cum_KL_weight)
# trInst.prev_weight = trInst.cum_KL_weight
# print("saving total_samples:", trInst.total_samples)
# print("mix_weight before torch.tensor: ", mix_weight)
# print("{}-{} [seed {}] applied weight (mix_weight): {}".format(epoch, batch_idx, trInst.trInst.seed, mix_weight))
# print("{}-{} [seed {}] Weight actual: w1:{:.4f} ".format(epoch, batch_idx, trInst.seed, wm[0]), "Cumulative: w1:{:.4f} ".format(trInst.cum_MV_weight))
print("{}-{} [seed {}] Applied MW: {:.4f} Curr. KLMW:{:.4f} Curr. MVMW: {:.4f}".format(epoch, batch_idx, trInst.seed, mix_weight, max_KL_mw, wm[0]), "Cum. KLMW: {:.4f} MVMW: {:.4f} ".format(trInst.cum_KL_weight, trInst.cum_MV_weight))
# print("{}-{} [seed {}] Applied MW: {:.4f} Current MVMW:{:.4f} ".format(epoch, batch_idx, trInst.seed, mix_weight, wm[0]), "Cum. MVMW: {:.4f} ".format(trInst.cum_MV_weight))
KL_cum_list.append(KL_val)
# print("{}-{} [seed {}] max_KL_mw: {} KL_val: {} KL_mean:{} cum_KL_weight: {}".format(epoch, batch_idx, trInst.seed, round(max_KL_mw, 4), round(KL_val,4), round(np.mean(KL_cum_list), 4), trInst.cum_KL_weight))
# mix_weight = cp(trInst.prev_weight)
mix_weight = torch.tensor(mix_weight).cuda(trInst.gpu)
mix_weight.requires_grad = False
loss_mt = torch.mul(mix_weight, loss_outputs) + torch.mul(1-mix_weight, 1.0*(loss_outputs_ce1 + loss_outputs_ce2))
# loss_mt = loss_outputs
loss_mt.backward()
optimizer.step()
target_source = [target, (label1,)]
output_sources = [outputs, outputs_ce1]
for k, metric_instance in enumerate(metric_instances):
met_target, met_outputs = target_source[k], output_sources[k]
metric_instance.eval_score(met_outputs, met_target, distance)
if batch_idx % log_interval == 0:
message = '[SIAM mixw: {:.1f}] Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(mix_weight.item(),
batch_idx * len(data[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses))
for metric in metric_instances:
message += '\t{}: {}'.format(metric.name(), metric.value())
# print(message)
losses = []
# batch_hist_list = [batch_pdf_cst, batch_pdf_ce, mw_batch_list, prev_weight]
# if epoch == 0 and MVLW:
print("\n===Epoch Level mix weight : w1:{:.4f} w2:{:.4f}".format(mix_weight, (1-mix_weight)))
if 0 <= epoch <= 500:
# beta = 1 - (0.5) ** (epoch-2)
# trInst.prev_weight = beta * mix_weight + (1-beta) * cp(trInst.cum_KL_weight)
beta = (0.75) ** (epoch)
beta = 1
delta_w = torch.abs(trInst.cum_KL_weight - mix_weight)
sign_delta_w = torch.sign(trInst.cum_KL_weight - mix_weight)
trInst.prev_weight = mix_weight + sign_delta_w * delta_w * beta
print("delta_w:{:.4f} sign: {:.4f}".format(delta_w, sign_delta_w))
print("====== mix_weight: {:.4f} beta: {:.4f} trInst.cum_KL_weight: {:.4f} result prev_weight: {:.4f}".format(mix_weight, beta, trInst.cum_KL_weight, trInst.prev_weight))
# trInst.prev_weight = cp(trInst.cum_KL_weight)
# trInst.prev_weight = cp(trInst.cum_MV_weight)
trInst.total_samples = 0
trInst.kl_mw_sum = 0
trInst.mv_mw_sum = 0
mix_weight_list = (str(trInst.seed), str(round(mix_weight.item(),4)), str(trInst.cum_KL_weight))
total_loss /= (batch_idx + 1)
ce_loss1 /= (batch_idx + 1)
ce_loss2 /= (batch_idx + 1)
return total_loss, ce_loss1, ce_loss2, metric_instances, mix_weight, trInst, mix_weight_list
def test_epoch(val_loader, model, loss_fn, cuda, metric_classes):
gpu=0
with torch.no_grad():
# for metric in metrics:
# metric.reset()
metric_instances=[]
for metric_class in metric_classes:
metric_instance = metric_class()
metric_instances.append(metric_instance)
model.eval()
val_loss = 0
for batch_idx, (data, target) in enumerate(val_loader):
# for batch_idx, (data, target, _,_ ) in enumerate(val_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if cuda:
data = tuple(d.cuda(gpu) for d in data)
if target is not None:
target = target.cuda(gpu)
outputs = model(*data)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs = outputs
if target is not None:
target = (target,)
loss_inputs += target
# loss_outputs, distance, losses_const = loss_fn(*loss_inputs)
if loss_fn.__class__.__name__ == 'CrossEntropy':
loss_mean, loss_outputs = loss_fn(*loss_inputs)
loss, distance = loss_mean, None
else:
loss_outputs, distance, losses_const = loss_fn(*loss_inputs)
loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs
for k, metric_instance in enumerate(metric_instances):
met_target, met_outputs = target, outputs
metric_instance.eval_score(met_outputs, met_target, distance)
val_loss += loss.item()
# for metric in metrics:
# metric(outputs, target, loss_outputs)
return val_loss, metric_instances
def test_siam_epoch(gpu, val_loader, epoch, model_org_pack, model, loss_fn_tup, cuda, metric_classes, trInst, MVLW):
with torch.no_grad():
metric_instances=[]
for metric_class in metric_classes:
metric_instance = metric_class()
metric_instances.append(metric_instance)
model_org, optimizer_org, scheduler_org = model_org_pack
model.eval()
val_loss = 0
losses = []
total_loss, ce_loss1, ce_loss2= 0, 0, 0
loss_list=[ [], [] ]
KL_cum_list = []
for batch_idx, (data, target, label1, label2) in enumerate(val_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if cuda:
data = tuple(d.cuda(trInst.gpu) for d in data)
if target is not None:
target = target.cuda(trInst.gpu)
label1 = label1.cuda(trInst.gpu)
label2 = label2.cuda(trInst.gpu)
data_siam = data + (None, None)
output1, output2, score1, score2 = model(*data_siam)
outputs = (output1, output2)
outputs_ce1 = (score1,)
outputs_ce2 = (score2,)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs_cst = outputs
if target is not None:
target = (target,)
loss_inputs_cst += target
loss_inputs_ce1 = outputs_ce1
loss_inputs_ce2 = outputs_ce2
if label1 is not None and label2 is not None:
loss_inputs_ce1 += (label1,)
loss_inputs_ce2 += (label2,)
### Put data, target, output into trInst
assert label1.shape[0] == score1.shape[0], "Label and score dimension should match."
trInst.model, trInst.data, trInst.target, trInst.outputs, trInst.metric_instances, trInst.label1, trInst.label2= \
(model, data, target, (output1, output2), metric_instances, label1, label2)
loss_fn, loss_fn_ce = loss_fn_tup
loss_outputs, distance, losses_const = loss_fn(*loss_inputs_cst)
loss_outputs_ce1, losses_ce1 = loss_fn_ce(*loss_inputs_ce1)
loss_outputs_ce2, losses_ce2 = loss_fn_ce(*loss_inputs_ce2)
losses = [loss_outputs.item(), loss_outputs_ce1.item(), loss_outputs_ce2.item()]
# val_loss = loss_outputs.item() + loss_outputs_ce1.item() + loss_outputs_ce2.item()
val_loss += loss_outputs.item()
ce_loss1 = loss_outputs_ce1.item()
ce_loss2 = loss_outputs_ce2.item()
### Loss processing
lcst, lce1, lce2 = (losses_const.detach().cpu().numpy(), losses_ce1.detach().cpu().numpy(), losses_ce2.detach().cpu().numpy())
MVLW = False
if epoch <= 20 and MVLW:
iter_loss_cst = lcst
iter_loss_ce = lce1 + lce2
wm = get_min_var_result(iter_loss_cst, iter_loss_ce)
if epoch == 0:
trInst, var_init_tup = run_epoch_0_task((epoch, batch_idx), (iter_loss_cst, iter_loss_ce), trInst)
(max_KL_mw, KL_val) = var_init_tup
else:
trInst, var_rest_tup = run_epoch_1_task((epoch, batch_idx), (iter_loss_cst, iter_loss_ce), trInst)
(max_KL_mw, KL_val) = var_rest_tup
decay = 0 # decay=epoch for decaying values
if epoch >= 0 :
trInst.mv_mw_sum += wm[0] * losses_const.shape[0] * np.exp(-1*decay)
trInst.kl_mw_sum += max_KL_mw * losses_const.shape[0] * np.exp(-1*decay)
trInst.total_samples += losses_const.shape[0] * np.exp(-1*decay)
# print("total_samples:", trInst.total_samples, "kl_mw_sum:", trInst.kl_mw_sum)
trInst.cum_MV_weight = round(float(trInst.mv_mw_sum/trInst.total_samples), 4)
trInst.cum_KL_weight = round(float(trInst.kl_mw_sum/trInst.total_samples), 4)
if epoch == 0:
mix_weight = cp(trInst.prev_weight)
else:
mix_weight = cp(trInst.prev_weight)
print("VAL {}-{} [seed {}] Applied MW: >>[{:.4f}]<< Current KLMW:{:.4f} ".format(epoch, batch_idx, trInst.seed, mix_weight, max_KL_mw), "Cum. KLMW: {:.4f} ".format(trInst.cum_KL_weight))
mix_weight = torch.tensor(mix_weight).cuda(trInst.gpu)
mix_weight.requires_grad = False
KL_cum_list.append(KL_val)
target_source = [target, (label1,)]
output_sources = [outputs, outputs_ce1]
for k, metric_instance in enumerate(metric_instances):
target, outputs = target_source[k], output_sources[k]
metric_instance.eval_score(outputs, target, distance)
return val_loss, ce_loss1, ce_loss2, metric_instances
def fit(train_loader, val_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[], start_epoch=0):
"""
Loaders, model, loss function and metrics should work together for a given task,
i.e. The model should be able to process data output of loaders,
loss function should process target output of loaders and outputs from the model
Examples: Classification: batch loader, classification model, NLL loss, accuracy metric
Siamese network: Siamese loader, siamese model, contrastive loss
Online triplet learning: batch loader, embedding model, online triplet loss
"""
for epoch in range(0, start_epoch):
scheduler.step()
for epoch in range(start_epoch, n_epochs):
scheduler.step()
# Train stage
train_loss, metrics = train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metrics)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, train_loss)
for metric in metrics:
message += ' {}: {}'.format(metric.name(), metric.value())
val_loss, metrics = test_epoch(val_loader, model, loss_fn, cuda, metrics)
val_loss /= len(val_loader)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, n_epochs,
val_loss)
for metric in metrics:
message += ' {}: {}'.format(metric.name(), metric.value())
print(message)
def fit_org(train_loader, val_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metric_classes=[], start_epoch=0):
"""
Loaders, model, loss function and metrics should work together for a given task,
i.e. The model should be able to process data output of loaders,
loss function should process target output of loaders and outputs from the model
Examples: Classification: batch loader, classification model, NLL loss, accuracy metric
Siamese network: Siamese loader, siamese model, contrastive loss
Online triplet learning: batch loader, embedding model, online triplet loss
"""
for epoch in range(0, start_epoch):
scheduler.step()
for epoch in range(start_epoch, n_epochs):
scheduler.step()
# Train stage
train_loss, metrics = train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metric_classes)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, train_loss)
for metric in metrics:
message += '{}: {}'.format(metric.name(), metric.value())
val_loss, metrics = test_epoch(val_loader, model, loss_fn, cuda, metric_classes)
val_loss /= len(val_loader)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, n_epochs,
val_loss)
for metric in metrics:
message += ' {}: {}'.format(metric.name(), metric.value())
print(message)
def train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metric_classes):
# for metric in metrics:
# metric.reset()
gpu = 0
metric_instances=[]
for metric_class in metric_classes:
metric_instance = metric_class()
metric_instances.append(metric_instance)
model.train()
losses = []
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
# for batch_idx, (data, target, _, _) in enumerate(train_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if cuda:
data = tuple(d.cuda(gpu) for d in data)
if target is not None:
target = target.cuda(gpu)
optimizer.zero_grad()
outputs = model(*data)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs = outputs
if target is not None:
target = (target,)
loss_inputs += target
# loss_outputs = loss_fn(*loss_inputs)
# ipdb.set_trace()
if loss_fn.__class__.__name__ == 'CrossEntropy':
loss_mean, loss_outputs = loss_fn(*loss_inputs)
loss, distance = loss_mean, None
else:
loss_outputs, distance, losses_const = loss_fn(*loss_inputs)
loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs
losses.append(loss.item())
total_loss += loss.item()
loss.backward()
optimizer.step()
# target_source = [target, (label1,)]
# output_sources = [outputs, outputs_ce1]
for k, metric_instance in enumerate(metric_instances):
met_target, met_outputs = target, outputs
metric_instance.eval_score(met_outputs, met_target, distance)
if batch_idx % log_interval == 0:
message = 'Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
batch_idx * len(data[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses))
for metric in metric_instances:
message += '\t{}: {}'.format(metric.name(), metric.value())
# for metric in metrics:
# metric(outputs, target, loss_outputs)
# if batch_idx % log_interval == 0:
# message = 'Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# batch_idx * len(data[0]), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), np.mean(losses))
# for metric in metrics:
# message += '\t{}: {}'.format(metric.name(), metric.value())
print(message)
losses = []
total_loss /= (batch_idx + 1)
return total_loss, metric_instances
|
import json
import logging
import csv
import pkg_resources
from oic.extension.token import JWTToken
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authz import AuthzHandling
from oic.utils.keyio import keyjar_init
from oic.utils.sdb import SessionDB
from oic.utils.userinfo import UserInfo
from otest.events import Events
from otest.rp.provider import Provider
logger = logging.getLogger(__name__)
__author__ = 'roland'
def read_uri_schemes(filename):
csvfile = open(filename, 'r')
l = csvfile.readline()
l = l.strip()
fieldnames = l.split(',')
reader = csv.DictReader(csvfile, fieldnames)
return dict(
[(r['URI Scheme'], '{} {}'.format(r['Description'], r['Reference'])) for
r in reader])
def read_path2port_map(filename):
"""
Reads csv file containing two columns: column1 is path name,
column2 is port number
:param filename:
:return: dictionary with port as key and path as value
"""
res = {}
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
res[row[1]] = row[0]
return res
def as_arg_setup(args, lookup, config):
if args.port:
_port = args.port
else:
if args.tls:
_port = 443
else:
_port = 80
if args.path2port:
# means there is a reverse proxy in front translating
# path -> port
p2p_map = read_path2port_map(args.path2port)
_path = p2p_map[_port]
if args.xport:
_issuer = "{base}:{port}/{path}".format(base=config.baseurl,
port=args.xport,
path=_path)
_port = args.xport
else:
_issuer = "{base}/{path}".format(base=config.baseurl, path=_path)
else: # the old port based
_path = ''
_issuer = "{base}:{port}".format(base=config.baseurl, port=_port)
if args.tls and _issuer.startswith('http://'):
_issuer = _issuer.replace('http://', 'https://')
cdb = {}
ac = AuthnBroker()
for authkey, value in list(config.AUTHENTICATION.items()):
authn = None
# if "UserPassword" == authkey:
# from oic.utils.authn.user import UsernamePasswordMako
# authn = UsernamePasswordMako(None, "login.mako", LOOKUP, PASSWD,
# "authorization")
if "NoAuthn" == authkey:
from oic.utils.authn.user import NoAuthn
authn = NoAuthn(None, user=config.AUTHENTICATION[authkey]["user"])
if authn is not None:
ac.add(config.AUTHENTICATION[authkey]["ACR"], authn,
config.AUTHENTICATION[authkey]["WEIGHT"])
# dealing with authorization
authz = AuthzHandling()
if config.USERINFO == "SIMPLE":
# User info is a simple dictionary in this case statically defined in
# the configuration file
userinfo = UserInfo(config.USERDB)
else:
userinfo = None
as_args = {
"name": _issuer,
'instance_path': _path,
'instance_port': _port,
"cdb": cdb,
"authn_broker": ac,
"userinfo": userinfo,
"authz": authz,
"client_authn": verify_client,
"symkey": config.SYM_KEY,
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
"jwks_name": "./static/jwks_{}.json",
'event_db': Events(),
}
try:
as_args['behavior'] = config.BEHAVIOR
except AttributeError:
pass
com_args = {
"baseurl": config.baseurl,
}
for arg in ['name', 'cdb', 'authn_broker', 'userinfo', 'authz', 'template',
'jwks_name', 'client_authn', 'symkey', 'template_lookup']:
com_args[arg] = as_args[arg]
# Add own keys for signing/encrypting JWTs
try:
# a throw-away OP used to do the initial key setup
_op = Provider(sdb=SessionDB(com_args["baseurl"]), **com_args)
jwks = keyjar_init(_op, config.keys)
except KeyError:
key_arg = {}
else:
key_arg = {"jwks": jwks, "keys": config.keys}
as_args['jwks_name'] = 'static/jwks.json'
f = open('static/jwks.json', 'w')
f.write(json.dumps(jwks))
f.close()
if args.insecure:
_op.keyjar.verify_ssl = False
else:
_op.keyjar.verify_ssl = True
as_args['keyjar'] = _op.keyjar
as_args['sdb'] = SessionDB(
com_args["baseurl"],
token_factory=JWTToken('T', keyjar=_op.keyjar,
lt_pattern={'code': 3600, 'token': 900},
iss=com_args['baseurl'],
sign_alg='RS256'),
refresh_token_factory=JWTToken(
'R', keyjar=_op.keyjar, lt_pattern={'': 24 * 3600},
iss=com_args['baseurl'])
)
return as_args, key_arg
def main_setup(args, lookup, config):
config.issuer = config.issuer % args.port
config.SERVICE_URL = config.SERVICE_URL % args.port
as_args, key_arg = as_arg_setup(args, lookup, config)
kwargs = {
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
}
# Should I care about verifying the certificates used by other entities
if args.insecure:
kwargs["verify_ssl"] = False
else:
kwargs["verify_ssl"] = True
op_arg = key_arg
try:
op_arg["cookie_ttl"] = config.COOKIETTL
except AttributeError:
pass
try:
op_arg["cookie_name"] = config.COOKIENAME
except AttributeError:
pass
# print URLS
if args.debug:
op_arg["debug"] = True
# # All endpoints the OpenID Connect Provider should answer on
# add_endpoints(ENDPOINTS)
# op_arg["endpoints"] = ENDPOINTS
if args.port == 80:
_baseurl = config.baseurl
else:
if config.baseurl.endswith("/"):
config.baseurl = config.baseurl[:-1]
_baseurl = "%s:%d" % (config.baseurl, args.port)
if not _baseurl.endswith("/"):
_baseurl += "/"
op_arg["baseurl"] = _baseurl
logger.info('setup kwargs: {}'.format(kwargs))
try:
op_arg["marg"] = multi_keys(as_args, config.multi_keys)
except AttributeError as err:
pass
op_arg['uri_schemes'] = read_uri_schemes(
pkg_resources.resource_filename('otest', 'uri-schemes-1.csv'))
if args.op_profiles:
profiles = {}
for p in args.op_profiles:
profiles.update(json.loads(open(p).read()))
else:
profiles = {}
op_arg['profiles'] = profiles
logger.info("setup as_args: {}".format(as_args))
logger.info(" -- op_arg: {}".format(op_arg))
return as_args, op_arg, config
def multi_keys(as_args, key_conf):
# a throw-away OP used to do the initial key setup
_op = Provider(**as_args)
jwks = keyjar_init(_op, key_conf, "m%d")
return {"jwks": jwks, "keys": key_conf}
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import blockchain_pb2 as blockchain__pb2
import common_pb2 as common__pb2
import controller_pb2 as controller__pb2
class RPCServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetBlockNumber = channel.unary_unary(
'/controller.RPCService/GetBlockNumber',
request_serializer=controller__pb2.Flag.SerializeToString,
response_deserializer=controller__pb2.BlockNumber.FromString,
)
self.SendRawTransaction = channel.unary_unary(
'/controller.RPCService/SendRawTransaction',
request_serializer=controller__pb2.RawTransaction.SerializeToString,
response_deserializer=common__pb2.Hash.FromString,
)
self.GetBlockByHash = channel.unary_unary(
'/controller.RPCService/GetBlockByHash',
request_serializer=common__pb2.Hash.SerializeToString,
response_deserializer=blockchain__pb2.CompactBlock.FromString,
)
self.GetBlockByNumber = channel.unary_unary(
'/controller.RPCService/GetBlockByNumber',
request_serializer=controller__pb2.BlockNumber.SerializeToString,
response_deserializer=blockchain__pb2.CompactBlock.FromString,
)
self.GetTransaction = channel.unary_unary(
'/controller.RPCService/GetTransaction',
request_serializer=common__pb2.Hash.SerializeToString,
response_deserializer=controller__pb2.RawTransaction.FromString,
)
self.GetSystemConfig = channel.unary_unary(
'/controller.RPCService/GetSystemConfig',
request_serializer=common__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.SystemConfig.FromString,
)
self.GetVersion = channel.unary_unary(
'/controller.RPCService/GetVersion',
request_serializer=common__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.SoftwareVersion.FromString,
)
self.GetBlockHash = channel.unary_unary(
'/controller.RPCService/GetBlockHash',
request_serializer=controller__pb2.BlockNumber.SerializeToString,
response_deserializer=common__pb2.Hash.FromString,
)
self.GetTransactionBlockNumber = channel.unary_unary(
'/controller.RPCService/GetTransactionBlockNumber',
request_serializer=common__pb2.Hash.SerializeToString,
response_deserializer=controller__pb2.BlockNumber.FromString,
)
self.GetTransactionIndex = channel.unary_unary(
'/controller.RPCService/GetTransactionIndex',
request_serializer=common__pb2.Hash.SerializeToString,
response_deserializer=controller__pb2.TransactionIndex.FromString,
)
self.GetPeerCount = channel.unary_unary(
'/controller.RPCService/GetPeerCount',
request_serializer=common__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.PeerCount.FromString,
)
class RPCServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetBlockNumber(self, request, context):
"""flag means latest or pending.
true means pending, false means latest.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendRawTransaction(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlockByHash(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlockByNumber(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransaction(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSystemConfig(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetVersion(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlockHash(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactionBlockNumber(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactionIndex(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPeerCount(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RPCServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetBlockNumber': grpc.unary_unary_rpc_method_handler(
servicer.GetBlockNumber,
request_deserializer=controller__pb2.Flag.FromString,
response_serializer=controller__pb2.BlockNumber.SerializeToString,
),
'SendRawTransaction': grpc.unary_unary_rpc_method_handler(
servicer.SendRawTransaction,
request_deserializer=controller__pb2.RawTransaction.FromString,
response_serializer=common__pb2.Hash.SerializeToString,
),
'GetBlockByHash': grpc.unary_unary_rpc_method_handler(
servicer.GetBlockByHash,
request_deserializer=common__pb2.Hash.FromString,
response_serializer=blockchain__pb2.CompactBlock.SerializeToString,
),
'GetBlockByNumber': grpc.unary_unary_rpc_method_handler(
servicer.GetBlockByNumber,
request_deserializer=controller__pb2.BlockNumber.FromString,
response_serializer=blockchain__pb2.CompactBlock.SerializeToString,
),
'GetTransaction': grpc.unary_unary_rpc_method_handler(
servicer.GetTransaction,
request_deserializer=common__pb2.Hash.FromString,
response_serializer=controller__pb2.RawTransaction.SerializeToString,
),
'GetSystemConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetSystemConfig,
request_deserializer=common__pb2.Empty.FromString,
response_serializer=controller__pb2.SystemConfig.SerializeToString,
),
'GetVersion': grpc.unary_unary_rpc_method_handler(
servicer.GetVersion,
request_deserializer=common__pb2.Empty.FromString,
response_serializer=controller__pb2.SoftwareVersion.SerializeToString,
),
'GetBlockHash': grpc.unary_unary_rpc_method_handler(
servicer.GetBlockHash,
request_deserializer=controller__pb2.BlockNumber.FromString,
response_serializer=common__pb2.Hash.SerializeToString,
),
'GetTransactionBlockNumber': grpc.unary_unary_rpc_method_handler(
servicer.GetTransactionBlockNumber,
request_deserializer=common__pb2.Hash.FromString,
response_serializer=controller__pb2.BlockNumber.SerializeToString,
),
'GetTransactionIndex': grpc.unary_unary_rpc_method_handler(
servicer.GetTransactionIndex,
request_deserializer=common__pb2.Hash.FromString,
response_serializer=controller__pb2.TransactionIndex.SerializeToString,
),
'GetPeerCount': grpc.unary_unary_rpc_method_handler(
servicer.GetPeerCount,
request_deserializer=common__pb2.Empty.FromString,
response_serializer=controller__pb2.PeerCount.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'controller.RPCService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RPCService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetBlockNumber(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetBlockNumber',
controller__pb2.Flag.SerializeToString,
controller__pb2.BlockNumber.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendRawTransaction(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/SendRawTransaction',
controller__pb2.RawTransaction.SerializeToString,
common__pb2.Hash.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetBlockByHash(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetBlockByHash',
common__pb2.Hash.SerializeToString,
blockchain__pb2.CompactBlock.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetBlockByNumber(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetBlockByNumber',
controller__pb2.BlockNumber.SerializeToString,
blockchain__pb2.CompactBlock.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTransaction(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetTransaction',
common__pb2.Hash.SerializeToString,
controller__pb2.RawTransaction.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetSystemConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetSystemConfig',
common__pb2.Empty.SerializeToString,
controller__pb2.SystemConfig.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetVersion(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetVersion',
common__pb2.Empty.SerializeToString,
controller__pb2.SoftwareVersion.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetBlockHash(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetBlockHash',
controller__pb2.BlockNumber.SerializeToString,
common__pb2.Hash.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTransactionBlockNumber(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetTransactionBlockNumber',
common__pb2.Hash.SerializeToString,
controller__pb2.BlockNumber.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTransactionIndex(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetTransactionIndex',
common__pb2.Hash.SerializeToString,
controller__pb2.TransactionIndex.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetPeerCount(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.RPCService/GetPeerCount',
common__pb2.Empty.SerializeToString,
controller__pb2.PeerCount.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class Consensus2ControllerServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetProposal = channel.unary_unary(
'/controller.Consensus2ControllerService/GetProposal',
request_serializer=common__pb2.Empty.SerializeToString,
response_deserializer=common__pb2.Hash.FromString,
)
self.CheckProposal = channel.unary_unary(
'/controller.Consensus2ControllerService/CheckProposal',
request_serializer=common__pb2.Hash.SerializeToString,
response_deserializer=common__pb2.SimpleResponse.FromString,
)
self.CommitBlock = channel.unary_unary(
'/controller.Consensus2ControllerService/CommitBlock',
request_serializer=common__pb2.ProposalWithProof.SerializeToString,
response_deserializer=common__pb2.Empty.FromString,
)
class Consensus2ControllerServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetProposal(self, request, context):
"""Consensus request a Proposal to start consensus
ret: proposal hash
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckProposal(self, request, context):
"""when Consensus received a new proposal from other nodes, it will ask controller to check it
args: proposal hash
ret: ok or not
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CommitBlock(self, request, context):
"""after Consensus, tell controller a proposal has committed
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_Consensus2ControllerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetProposal': grpc.unary_unary_rpc_method_handler(
servicer.GetProposal,
request_deserializer=common__pb2.Empty.FromString,
response_serializer=common__pb2.Hash.SerializeToString,
),
'CheckProposal': grpc.unary_unary_rpc_method_handler(
servicer.CheckProposal,
request_deserializer=common__pb2.Hash.FromString,
response_serializer=common__pb2.SimpleResponse.SerializeToString,
),
'CommitBlock': grpc.unary_unary_rpc_method_handler(
servicer.CommitBlock,
request_deserializer=common__pb2.ProposalWithProof.FromString,
response_serializer=common__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'controller.Consensus2ControllerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Consensus2ControllerService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetProposal(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.Consensus2ControllerService/GetProposal',
common__pb2.Empty.SerializeToString,
common__pb2.Hash.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CheckProposal(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.Consensus2ControllerService/CheckProposal',
common__pb2.Hash.SerializeToString,
common__pb2.SimpleResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CommitBlock(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/controller.Consensus2ControllerService/CommitBlock',
common__pb2.ProposalWithProof.SerializeToString,
common__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
#! /bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from gensim.models.word2vec import Word2Vec
from keras.preprocessing import sequence
import keras.utils
from keras import utils as np_utils
from keras.models import Sequential
from keras.models import model_from_yaml
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Dropout, Activation
from sklearn.cross_validation import train_test_split
import yaml
import sys
import multiprocessing
sys.setrecursionlimit(1000000)
reload(sys)
sys.setdefaultencoding('utf8')
np.random.seed()
#参数配置
cpu_count = multiprocessing.cpu_count() # 4CPU数量
voc_dim = 150 #word的向量维度
min_out = 10 #单词出现次数
window_size = 7 #WordVec中的滑动窗口大小
lstm_input = 150#lstm输入维度
epoch_time = 10#epoch
batch_size = 16 #batch
def loadfile():
#文件输入
neg = []
pos = []
with open('../data/pos.txt', 'r') as f:
for line in f.readlines():
pos.append(line)
f.close()
with open('../data/neg.txt', 'r') as f:
for line in f.readlines():
neg.append(line)
f.close()
X_Vec = np.concatenate((pos, neg))
y = np.concatenate((np.ones(len(pos), dtype=int),
np.zeros(len(neg), dtype=int)))
# print X_Vec,y
return X_Vec, y
def onecut(doc):
# 将中文分成一个一个的字
#print len(doc),ord(doc[0])
#print doc[0]+doc[1]+doc[2]
ret = [];
i=0
while i < len(doc):
c=""
#utf-8的编码格式,小于128的为1个字符,n个字符的化第一个字符的前n+1个字符是1110
if ord(doc[i])>=128 and ord(doc[i])<192:
print ord(doc[i])
assert 1==0#所以其实这里是不应该到达的
c = doc[i]+doc[i+1];
i=i+2
ret.append(c)
elif ord(doc[i])>=192 and ord(doc[i])<224:
c = doc[i] + doc[i + 1];
i = i + 2
ret.append(c)
elif ord(doc[i])>=224 and ord(doc[i])<240:
c = doc[i] + doc[i + 1] + doc[i + 2];
i = i + 3
ret.append(c)
elif ord(doc[i])>=240 and ord(doc[i])<248:
c = doc[i] + doc[i + 1] + doc[i + 2]+doc[i + 3];
i = i + 4
ret.append(c)
else :
assert ord(doc[i])<128
while ord(doc[i])<128:
c+=doc[i]
i+=1
if (i==len(doc)) :
break
if doc[i] is " ":
break;
elif doc[i] is ".":
break;
elif doc[i] is ";":
break;
ret.append(c)
'''
for i in range(len(ret)):
print ret[i]
if (i>=2):
break;
'''
return ret
def one_seq(text):
text1=[]
for document in text:
if len(document)<3:
continue
text1.append(onecut(document.replace('\n', '')) )
return text1
def word2vec_train(X_Vec):
model_word = Word2Vec(size=voc_dim,
min_count=min_out,
window=window_size,
workers=cpu_count,
iter=5)
model_word.build_vocab(X_Vec)
model_word.train(X_Vec, total_examples=model_word.corpus_count, epochs=model_word.iter)
model_word.save('../model/Word2Vec.pkl')
#print model_word.wv.vocab.keys()[54],model_word.wv.vocab.keys()
#print len(model_word.wv.vocab.keys())
#print model_word ['有']
input_dim = len(model_word.wv.vocab.keys()) + 1 #下标0空出来给不够10的字
embedding_weights = np.zeros((input_dim, voc_dim))
w2dic={}
for i in range(len(model_word.wv.vocab.keys())):
embedding_weights[i+1, :] = model_word [model_word.wv.vocab.keys()[i]]
w2dic[model_word.wv.vocab.keys()[i]]=i+1
#print embedding_weights
return input_dim,embedding_weights,w2dic
def data2inx(w2indx,X_Vec):
data = []
for sentence in X_Vec:
new_txt = []
for word in sentence:
try:
new_txt.append(w2indx[word])
except:
new_txt.append(0)
data.append(new_txt)
return data
def train_lstm(input_dim, embedding_weights, x_train, y_train, x_test, y_test):
model = Sequential()
model.add(Embedding(output_dim=voc_dim,
input_dim=input_dim,
mask_zero=True,
weights=[embedding_weights],
input_length=lstm_input))
model.add(LSTM(128, activation='softsign'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('sigmoid'))
print 'Compiling the Model...'
model.compile(loss='binary_crossentropy',#hinge
optimizer='adam', metrics=['mae', 'acc'])
print "Train..." # batch_size=32
model.fit(x_train, y_train, batch_size=batch_size, epochs=epoch_time, verbose=1)
print "Evaluate..."
score = model.evaluate(x_test, y_test,
batch_size=batch_size)
yaml_string = model.to_yaml()
with open('../model/lstm.yml', 'w') as outfile:
outfile.write(yaml.dump(yaml_string, default_flow_style=True))
model.save_weights('../model/lstm.h5')
print 'Test score:', score
X_Vec, y = loadfile()
X_Vec = one_seq(X_Vec)
input_dim,embedding_weights,w2dic = word2vec_train(X_Vec)
index = data2inx(w2dic,X_Vec)
index2 = sequence.pad_sequences(index, maxlen=voc_dim )
x_train, x_test, y_train, y_test = train_test_split(index2, y, test_size=0.2)
y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)
train_lstm(input_dim, embedding_weights, x_train, y_train, x_test, y_test) |
<reponame>scalasm/my-notes<filename>lambda/mynotes/core/notes.py
import logging
import uuid
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import List
from mynotes.core.architecture import (DataPage, DataPageQuery, DomainEntity, ObjectStore, ResourceNotFoundException, User,
wrap_exceptions)
from mynotes.core.utils.common import now
class NoteType(Enum):
"""Supported note types (free notes and interview notes)"""
FREE = "F"
INTERVIEW = "I",
QUESTION = "Q"
@dataclass
class Note(DomainEntity):
author_id: str
type: NoteType
creation_time: datetime
tags: List[str]
version: int
"""Entity class representing metadata associated to a Note entity"""
def __init__(self, id: str = None, author_id: str = None, type: NoteType = None, creation_time: datetime = None, tags: List[str] = None, version: int = None) -> None:
super().__init__(id)
self.author_id = author_id
self.creation_time = creation_time
self.type = type or NoteType.FREE
self.tags = tags
self.version = version or None
class NoteRepository(ABC):
@abstractmethod
def save(self, note: Note) -> None:
pass
@abstractmethod
def find_by_id(self, id: str) -> None:
pass
@abstractmethod
def delete_by_id(self, id: str) -> None:
pass
@abstractmethod
def find_all_by_type(self, note_type: NoteType, data_page_query: DataPageQuery) -> DataPage[Note]:
pass
@wrap_exceptions
class NoteUseCases:
"""
Use cases supported for Notes.
"""
bucket_adapter: ObjectStore
note_repository: NoteRepository
def __init__(self, bucket_adapter: ObjectStore, note_repository: NoteRepository) -> None:
self.bucket_adapter = bucket_adapter
self.note_repository = note_repository
def create_note(self, author: User, content: str, tags: List[str] = None) -> Note:
"""
Create a new note, based on Markdown standard.
Args:
author: the identified for the user who is creating this note
content: the content of the note
Returns:
the Note instance representing the created note
"""
note = Note(
id = str(uuid.uuid4()),
creation_time = now(),
author_id = author.user_id,
type = NoteType.FREE,
tags = tags or []
)
self.bucket_adapter.store(
self._get_object_key_for_note(note.id),
content.strip()
)
self.note_repository.save(note)
return note
def find_note_by_id(self, note_id: str) -> Note:
"""
Returns a note by its id, based on Markdown standard.
Args:
note_id: the id of the wanted note
Returns:
the Note instance matching the required id
Throws:
a ResourceNotFoundException if there is not such note
"""
note = self.note_repository.find_by_id(note_id)
if not note:
raise ResourceNotFoundException("Note", note_id)
return note
def delete_note_by_id(self, note_id: str) -> Note:
"""
Deletes a note with the specified id, if present.
Args:
note_id: the id of the wanted note
Returns:
nothing
"""
self.note_repository.delete_by_id(note_id)
self.bucket_adapter.delete(
self._get_object_key_for_note(note_id)
)
def _get_object_key_for_note(self, note_id: str) -> str:
return f"notes/{note_id}.md"
|
import os
import argparse
import matplotlib.pyplot as plt
from numpy.core.multiarray import empty
import pandas as pd
import numpy as np
import sys
from keras.models import load_model
from keras.backend import clear_session
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import *
from sklearn.preprocessing import MinMaxScaler
if len(sys.argv) < 5:
print("Too few arguments")
quit()
elif len(sys.argv) > 5:
print("Too many arguments")
quit()
# Read the arguments
for i in range (1, len(sys.argv)):
if(sys.argv[i] == "-d"):
dataset_name = sys.argv[i+1]
elif(sys.argv[i] == "-n"):
num_time_series = int(sys.argv[i+1])
csv_path = os.path.join(os.path.abspath(__file__), "../../../dir/")
csv_path = os.path.join(csv_path, dataset_name)
model_path = os.path.join(os.path.abspath(__file__), "../../../models/")
# Read the input file
df = pd.read_csv(csv_path, header=None, delimiter='\t')
file_ids = df.iloc[:, [0]].values
df = df.drop(df.columns[0], axis=1)
df = df.transpose()
# Training data: 80%, Test data: 20%
train_size = int(len(df) * 0.80)
test_size = len(df) - train_size
sc = MinMaxScaler(feature_range = (0, 1))
# For each time series get a prediction
for step in range (0, num_time_series):
X_train = []
y_train = []
training_set = df.iloc[:train_size, [step]].values
training_set_scaled = sc.fit_transform(training_set)
for i in range(60, train_size):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
dataset_train = df.iloc[:train_size, [step]]
dataset_test = df.iloc[train_size:, [step]]
dataset_total = pd.concat((dataset_train, dataset_test), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, test_size+60):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# Load model for this time series
model = load_model(os.path.join(model_path, "Forecast_model" + str(step) +".h5"))
# Get the prediction
predicted_stock_price = model.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Visualising the results
plt.plot(dataset_test.values, color = 'red', label = 'Real values')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted values')
plt.title('Time Series Prediction')
plt.xlabel('Time')
plt.ylabel('Value')
plt.legend()
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
plt.savefig(os.path.join(os.path.abspath(__file__), "../../../dir/exports/q1/var1/graph"+ str(step) +".png"))
plt.clf()
del model
clear_session() |
<filename>hyp/base.py
"""Base classes for the ``Responders`` and ``Adapters``.
"""
import json
from .collector import Collector
from six import iteritems
from .helpers import *
from .constants import JSONAPI_VERSION_DICT
class NonCompliantException(Exception):
pass
class BaseResponder(object):
TYPE = None
SERIALIZER = None
LINKS = None
ADAPTER = None
# Responder can override this if the key to obtain the ID is
# something other than "id". responses always use the key "id" as per spec
id_access_key = "id"
def __init__(self):
if not self.ADAPTER:
raise NotImplementedError('Responder must define ADAPTER class variable')
self.adapter = self.ADAPTER(self.SERIALIZER)
@classmethod
def build(cls, *args, **kwargs):
return cls()._respond(*args, **kwargs)
@classmethod
def respond(cls, *args, **kwargs):
return json.dumps(cls()._respond(*args, **kwargs))
def _respond(self, instance_or_instances, meta=None, error=False, links=None, related=None, collect=False, compound=True):
links = self.links(links, related)
document = {
"jsonapi": JSONAPI_VERSION_DICT
}
if meta is not None:
document['meta'] = build_meta(meta)
if error:
#assumes the error object is ok since it's pretty loosely spec-ed
if not isinstance(instance_or_instances, list):
document['errors'] = list(instance_or_instances)
else:
document['errors'] = instance_or_instances
else:
data = {}
collector = Collector()
if related is not None:
self.collect_included(collector, related)
if links is not None:
self.collect_links(collector, links)
if collect:
links = list(self.LINKS.keys())
data = self.build_resources(instance_or_instances, links, collector)
else:
data = self.build_resources(instance_or_instances, links)
if compound:
document['included'] = collector.get_included_resources() # TODO: maybe call this like get included data or something because relationships dict is different
document['links'] = collector.get_links_dict()
document['data'] = data
# Filter out empty lists
return dict([(k, d) for k, d in list(document.items()) if d])
def links(self, links, included):
if included is not None:
links = list(included.keys())
return links
def collect_links(self, collector, links):
# Use the collector to build the links structure
for key in links:
collector.link(self, key)
def collect_included(self, collector, included):
for key, instances in iteritems(included):
link = self.LINKS[key]
responder = link['responder']()
for instance in instances:
resource = responder.build_resource(instance)
collector.include(responder.TYPE, instance[responder.id_access_key], resource)
def build_resources(self, instance_or_instances, links=None, collector=None):
builder = lambda instance: self.build_resource(instance, links, collector)
return self.apply_to_object_or_list(builder, instance_or_instances)
def build_resource(self, instance, links=None, collector=None, meta=None, selflink=True):
resource = build_resource_identifier(self.TYPE, instance[self.id_access_key])
resource["attributes"] = self.adapter(instance)
#remove the ID from the attributes section if necessary
if resource["attributes"][self.id_access_key]:
del resource["attributes"][self.id_access_key]
if links is not None:
resource['links'] = self.build_resource_links(instance, links, collector)
if meta is not None:
resource['meta'] = build_meta(meta)
return resource
def build_resource_links(self, instance, links, collector=None):
resource_links = {}
for link in links:
properties = self.LINKS[link]
try:
key = properties.get('key', link)
associated = self.pick(instance, key)
if collector:
collector.link(self, link)
except KeyError:
# Ignore links when not defined in the object
continue
if isinstance(associated, list):
associated = [i for i in associated if i is not None]
if len(associated) == 0:
continue
else:
if associated is None:
continue
if collector is not None:
responder = properties['responder']
builder = lambda instance: self.collect(collector, responder, link, instance, responder.id_access_key)
else:
builder = lambda instance: self.pick(instance, self.id_access_key)
resource_links[link] = self.apply_to_object_or_list(builder, associated)
return resource_links
def apply_to_object_or_list(self, func, object_or_list):
if isinstance(object_or_list, list):
return list(map(func, object_or_list))
else:
return func(object_or_list)
def collect(self, collector, responder, type, instance, key):
responder_instance = responder()
id = self.pick(instance, key)
if responder.LINKS and self.pick(instance, key):
responder_links = list(responder.LINKS.keys())
resource = responder_instance.build_resource(instance, responder_links, collector)
collector.include(responder.TYPE, id, resource)
else:
collector.include(responder.TYPE, id, instance)
return id
def pick(self, instance, key):
try:
return getattr(instance, key)
except AttributeError:
return instance[key]
class BaseAdapter(object):
"""Base class from which all :class:`Adapter` classes inherit.
"""
def __call__(self, instance):
"""Serialize ``instance`` to a dictionary of Python primitives."""
raise NotImplementedError('Adapter class must define __call__')
|
###
# Copyright (c) 2005,2008, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import time
import queue as Queue
import random
import threading
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.schedule as schedule
import supybot.callbacks as callbacks
class SqlAlchemyMarkovDB(object):
def __init__(self, filename, engine):
self.dbs = ircutils.IrcDict()
self.filename = filename
self.engine = engine
def close(self):
self.dbs.clear()
def _getDb(self, channel, debug=False):
if channel in self.dbs:
return self.dbs[channel]
try:
import sqlalchemy as sql
self.sql = sql
except ImportError:
raise callbacks.Error('You need to have SQLAlchemy installed to use this ' \
'plugin. Download it at <http://www.sqlalchemy.org/>')
filename = plugins.makeChannelFilename(self.filename, channel)
engine = sql.create_engine(self.engine + filename, echo=debug)
metadata = sql.MetaData()
firsts = sql.Table('firsts', metadata,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('first', sql.Text, unique=True),
sql.Column('count', sql.Integer, default=1),
)
lasts = sql.Table('lasts', metadata,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('last', sql.Text, unique=True),
sql.Column('count', sql.Integer, default=1),
)
pairs = sql.Table('pairs', metadata,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('first', sql.Text, default=sql.null),
sql.Column('second', sql.Text, default=sql.null),
sql.Column('follow', sql.Text, default=sql.null),
sql.Column('count', sql.Integer, default=1),
sql.UniqueConstraint('first', 'second', 'follow'),
)
metadata.create_all(engine)
self.dbs[channel] = (engine, firsts, lasts, pairs)
return self.dbs[channel]
def _addFirst(self, db, table, first):
s = self.sql.select([table.c.count], table.c.first==first)
results = db.execute(s)
r = results.fetchone()
if r is None:
db.execute(table.insert(), first=first).close()
else:
db.execute(table.update(), count=r[0]+1).close()
def _addLast(self, db, table, last):
s = self.sql.select([table.c.count], table.c.last==last)
results = db.execute(s)
r = results.fetchone()
if r is None:
db.execute(table.insert(), last=last).close()
else:
db.execute(table.update(), count=r[0]+1).close()
def addPair(self, channel, first, second, follower, isFirst, isLast):
(db, firsts, lasts, pairs) = self._getDb(channel)
if isFirst:
self._addFirst(db, firsts, follower)
return
if isLast:
self._addLast(db, lasts, second)
s = self.sql.select([pairs.c.count],
self.sql.and_(pairs.c.first==first,
pairs.c.second==second,
pairs.c.follow==follower))
results = db.execute(s)
r = results.fetchone()
if r is None:
db.execute(pairs.insert(), first=first, second=second,
follow=follower).close()
else:
db.execute(pairs.update(), count=r[0]+1).close()
def _weightedChoice(self, results):
L = []
for t in results:
c = t[-1]
while c > 0:
c -= 1
L.append(t[:-1])
return utils.iter.choice(L)
def getFirstPair(self, channel):
(db, _, _, pairs) = self._getDb(channel)
s = self.sql.select([pairs.c.first, pairs.c.second, pairs.c.count],
pairs.c.first==None)
results = db.execute(s)
r = results.fetchall()
results.close()
if not r:
raise KeyError
return self._weightedChoice(r)
def getFollower(self, channel, first, second):
(db, _, _, pairs) = self._getDb(channel)
s = self.sql.select([pairs.c.first, pairs.c.second,
pairs.c.follow, pairs.c.count],
self.sql.and_(pairs.c.first==first,
pairs.c.second==second))
results = db.execute(s)
r = results.fetchall()
results.close()
if not r:
raise KeyError
print('foo')
print((repr(r)))
L = self._weightedChoice(r)
isLast = False
if not L[-1]:
isLast = True
return (L[-2], isLast)
def firsts(self, channel):
(db, firsts, _, _) = self._getDb(channel)
s = self.sql.select([firsts.c.count])
results = db.execute(s)
r = results.fetchall()
results.close()
if not r:
return 0
else:
return sum([x[0] for x in r])
def lasts(self, channel):
(db, _, lasts, _) = self._getDb(channel)
s = self.sql.select([lasts.c.count])
results = db.execute(s)
r = results.fetchall()
results.close()
if not r:
return 0
else:
return sum([x[0] for x in r])
def pairs(self, channel):
(db, _, _, pairs) = self._getDb(channel)
s = self.sql.select([pairs.c.count])
results = db.execute(s)
r = results.fetchall()
results.close()
if not r:
return 0
else:
return sum([x[0] for x in r])
def follows(self, channel):
(db, _, _, pairs) = self._getDb(channel)
s = self.sql.select([pairs.c.count],
self.sql.not_(pairs.c.follow==None))
results = db.execute(s)
r = results.fetchall()
results.close()
if not r:
return 0
else:
return sum([x[0] for x in r])
class DbmMarkovDB(object):
def __init__(self, filename):
self.dbs = ircutils.IrcDict()
self.filename = filename
def close(self):
for db in self.dbs.values():
db.close()
def _getDb(self, channel):
import dbm
if channel not in self.dbs:
filename = plugins.makeChannelFilename(self.filename, channel)
# To keep the code simpler for addPair, I decided not to make
# self.dbs[channel]['firsts'] and ['lasts']. Instead, we'll pad
# the words list being sent to addPair such that ['\n \n'] will be
# ['firsts'] and ['\n'] will be ['lasts'].
self.dbs[channel] = dbm.open(filename, 'c')
return self.dbs[channel]
def _flush(self, db):
if hasattr(db, 'sync'):
db.sync()
if hasattr(db, 'flush'):
db.flush()
def _addPair(self, channel, pair, follow):
db = self._getDb(channel)
# EW! but necessary since not all dbm backends support
# "combined in db"
if pair.encode('utf-8') in db:
db[pair] = b' '.join([db[pair], follow.encode('utf-8')])
else:
db[pair] = follow.encode('utf-8')
self._flush(db)
def _combine(self, first, second):
first = first or '\n'
second = second or '\n'
return '%s %s' % (first, second)
def addPair(self, channel, first, second, follower, isFirst, isLast):
combined = self._combine(first, second)
self._addPair(channel, combined, follower or '\n')
if isLast:
self._addPair(channel, '\n', second)
def getFirstPair(self, channel):
db = self._getDb(channel)
firsts = db['\n \n'].split()
if firsts:
return (None, utils.iter.choice(firsts))
else:
raise KeyError('No firsts for %s.' % channel)
def getFollower(self, channel, first, second):
db = self._getDb(channel)
followers = db[self._combine(first, second)]
follower = utils.iter.choice(followers.split(' '))
last = False
if follower == '\n':
follower = None
last = True
return (follower, last)
def firsts(self, channel):
db = self._getDb(channel)
if b'\n \n' in db:
return len(set(db['\n \n'].split()))
else:
return 0
def lasts(self, channel):
db = self._getDb(channel)
if b'\n' in db:
return len(set(db['\n'].split()))
else:
return 0
def pairs(self, channel):
db = self._getDb(channel)
pairs = [k for k in db.keys() if b'\n' not in k]
return len(pairs)
def follows(self, channel):
db = self._getDb(channel)
# dbm sucks in that we're not guaranteed to have .iteritems()
# *cough*gdbm*cough*, so this has to be done the stupid way
follows = [len([f for f in db[k].split() if f != b'\n'])
for k in db.keys() if b'\n' not in k]
return sum(follows)
MarkovDB = plugins.DB('Markov', {'dbm': DbmMarkovDB,
'sqlalchemy': SqlAlchemyMarkovDB})
class MarkovWorkQueue(threading.Thread):
def __init__(self, *args, **kwargs):
name = 'Thread #%s (MarkovWorkQueue)' % world.threadsSpawned
world.threadsSpawned += 1
threading.Thread.__init__(self, name=name)
self.db = MarkovDB(*args, **kwargs)
self.q = Queue.Queue()
self.killed = False
self.setDaemon(True)
self.start()
def die(self):
self.killed = True
self.q.put(None)
def enqueue(self, f):
self.q.put(f)
def run(self):
while not self.killed:
f = self.q.get()
if f is not None:
f(self.db)
self.db.close()
class Markov(callbacks.Plugin):
def __init__(self, irc):
self.q = MarkovWorkQueue()
self.__parent = super(Markov, self)
self.__parent.__init__(irc)
self.lastSpoke = time.time()
def die(self):
self.q.die()
self.__parent.die()
def tokenize(self, m):
if ircmsgs.isAction(m):
return ircmsgs.unAction(m).split()
elif ircmsgs.isCtcp(m):
return []
else:
return m.args[1].split()
def doPrivmsg(self, irc, msg):
if irc.isChannel(msg.args[0]):
speakChan = msg.args[0]
dbChan = plugins.getChannel(speakChan)
canSpeak = False
now = time.time()
throttle = self.registryValue('randomSpeaking.throttleTime',
speakChan)
prob = self.registryValue('randomSpeaking.probability', speakChan)
delay = self.registryValue('randomSpeaking.maxDelay', speakChan)
if now > self.lastSpoke + throttle:
canSpeak = True
if canSpeak and random.random() < prob:
f = self._markov(speakChan, irc, prefixNick=False,
to=speakChan, Random=True)
schedule.addEvent(lambda: self.q.enqueue(f), now + delay)
self.lastSpoke = now + delay
words = self.tokenize(msg)
# This shouldn't happen often (CTCP messages being the possible
# exception)
if not words:
return
if self.registryValue('ignoreBotCommands', speakChan) and \
callbacks.addressed(irc.nick, msg):
return
words.insert(0, None)
words.insert(0, None)
words.append(None)
def doPrivmsg(db):
for (first, second, follower) in utils.seq.window(words, 3):
db.addPair(dbChan, first, second, follower,
isFirst=(first is None and second is None),
isLast=(follower is None))
self.q.enqueue(doPrivmsg)
def _markov(self, channel, irc, word1=None, word2=None, **kwargs):
def f(db):
minLength = self.registryValue('minChainLength', channel)
maxTries = self.registryValue('maxAttempts', channel)
Random = kwargs.pop('Random', None)
while maxTries > 0:
maxTries -= 1
if word1 and word2:
words = [word1, word2]
resp = [word1]
follower = word2
elif word1 or word2:
words = [None, word1 or word2]
resp = []
follower = words[-1]
else:
try:
# words is of the form [None, word]
words = list(db.getFirstPair(channel))
resp = []
follower = words[-1]
except KeyError:
irc.error(
format('I don\'t have any first pairs for %s.',
channel))
return # We can't use raise here because the exception
# isn't caught and therefore isn't sent to the
# server
last = False
while not last:
resp.append(follower)
try:
(follower, last) = db.getFollower(channel, words[-2],
words[-1])
except KeyError:
irc.error('I found a broken link in the Markov chain. '
' Maybe I received two bad links to start '
'the chain.')
return # ditto here re: Raise
words.append(follower)
if len(resp) >= minLength:
irc.reply(' '.join(resp), **kwargs)
return
else:
continue
if not Random:
irc.error(
format('I was unable to generate a Markov chain at least '
'%n long.', (minLength, 'word')))
else:
self.log.debug('Not randomSpeaking. Unable to generate a '
'Markov chain at least %n long.',
(minLength, 'word'))
return f
def markov(self, irc, msg, args, channel, word1, word2):
"""[<channel>] [word1 [word2]]
Returns a randomly-generated Markov Chain generated sentence from the
data kept on <channel> (which is only necessary if not sent in the
channel itself). If word1 and word2 are specified, they will be used
to start the Markov chain.
"""
f = self._markov(channel, irc, word1, word2,
prefixNick=False, Random=False)
self.q.enqueue(f)
markov = wrap(markov, ['channeldb', optional('something'),
additional('something')])
def firsts(self, irc, msg, args, channel):
"""[<channel>]
Returns the number of Markov's first links in the database for
<channel>.
"""
def firsts(db):
irc.reply(
format('There are %s firsts in my Markov database for %s.',
db.firsts(channel), channel))
self.q.enqueue(firsts)
firsts = wrap(firsts, ['channeldb'])
def lasts(self, irc, msg, args, channel):
"""[<channel>]
Returns the number of Markov's last links in the database for
<channel>.
"""
def lasts(db):
irc.reply(
format('There are %i lasts in my Markov database for %s.',
db.lasts(channel), channel))
self.q.enqueue(lasts)
lasts = wrap(lasts, ['channeldb'])
def pairs(self, irc, msg, args, channel):
"""[<channel>]
Returns the number of Markov's chain links in the database for
<channel>.
"""
def pairs(db):
irc.reply(
format('There are %i pairs in my Markov database for %s.',
db.pairs(channel), channel))
self.q.enqueue(pairs)
pairs = wrap(pairs, ['channeldb'])
def follows(self, irc, msg, args, channel):
"""[<channel>]
Returns the number of Markov's third links in the database for
<channel>.
"""
def follows(db):
irc.reply(
format('There are %i follows in my Markov database for %s.',
db.follows(channel), channel))
self.q.enqueue(follows)
follows = wrap(follows, ['channeldb'])
def stats(self, irc, msg, args, channel):
"""[<channel>]
Returns all stats (firsts, lasts, pairs, follows) for <channel>'s
Markov database.
"""
def stats(db):
irc.reply(
format('Firsts: %i; Lasts: %i; Pairs: %i; Follows: %i',
db.firsts(channel), db.lasts(channel),
db.pairs(channel), db.follows(channel)))
self.q.enqueue(stats)
stats = wrap(stats, ['channeldb'])
Class = Markov
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
import FWCore.ParameterSet.Config as cms
# This is used to modify parameters for Run 2 (see bottom of file)
#Global fast calorimetry parameters
from FastSimulation.Calorimetry.HcalResponse_cfi import *
from FastSimulation.Calorimetry.HSParameters_cfi import *
#from FastSimulation.Configuration.CommonInputs_cff import *
from FastSimulation.Calorimetry.ECALResponse_cfi import *
FamosCalorimetryBlock = cms.PSet(
Calorimetry = cms.PSet(
#ECALScaleBlock, # comment out to disable scaling
HSParameterBlock,
HCALResponseBlock,
ECAL = cms.PSet(
# See FastSimulation/CaloRecHitsProducer/python/CaloRecHits_cff.py
Digitizer = cms.untracked.bool(False),
# If set to true the simulation in ECAL would be done 1X0 by 1X0
# this is slow but more adapted to detailed studies.
# Otherwise roughty 5 steps are used.
bFixedLength = cms.bool(False),
# For the core 10% of the spots for
CoreIntervals = cms.vdouble(100.0, 0.1),
# change the radius of the tail of the shower
RTFactor = cms.double(1.0),
# change the radius of the core of the shower
RCFactor = cms.double(1.0),
# For the tail 10% of r<1RM. 100% otherwise
TailIntervals = cms.vdouble(1.0, 0.1, 100.0, 1.0),
FrontLeakageProbability = cms.double(1.0),
GridSize = cms.int32(7),
# change globally the Moliere radius
### changed after tuning - Feb - July - <NAME>
#RadiusFactor = cms.double(1.096),
RadiusFactorEB = cms.double(1.096),
RadiusFactorEE = cms.double(1.25),
### changed after tuning - Feb - July - <NAME>
RadiusPreshowerCorrections = cms.vdouble(0.137, 10.3), # default value for maxshower depth dependence-->works fine
MipsinGeV = cms.vdouble(0.0001421,0.0000812), # increase in mipsinGeV by 75% only in layer1
#SpotFraction < 0 <=> deactivated. In the case, CoreIntervals and
#TailIntervals are used
SpotFraction = cms.double(-1.0),
GapLossProbability = cms.double(0.9),
SimulatePreshower = cms.bool(True)
),
ForwardCalorimeterProperties = cms.PSet(
HadronicCalorimeterProperties= cms.PSet(
HCAL_Sampling = cms.double(0.0035),
# Watch out ! The following two values are defined wrt the electron shower simulation
# There are not directly related to the detector properties
HCAL_PiOverE = cms.double(0.2),
# HCAL_PiOverE = cms.double(0.4)
HCALAeff= cms.double(55.845),
HCALZeff= cms.double(26),
HCALrho= cms.double(7.87),
HCALradiationLengthIncm= cms.double(1.757),
HCALradLenIngcm2= cms.double(13.84),
HCALmoliereRadius= cms.double(1.719),
HCALcriticalEnergy= cms.double(21E-3),
HCALinteractionLength= cms.double(16.77),
HCALetatow=cms.vdouble( 0.000, 0.087, 0.174, 0.261, 0.348, 0.435, 0.522, 0.609, 0.696, 0.783, 0.870, 0.957, 1.044, 1.131, 1.218, 1.305, 1.392, 1.479, 1.566, 1.653, 1.740, 1.830, 1.930, 2.043, 2.172, 2.322, 2.500, 2.650, 2.853, 3.000, 3.139, 3.314, 3.489, 3.664, 3.839, 4.013, 4.191, 4.363, 4.538, 4.716, 4.889, 5.191),
# HCALDepthLam=cms.vdouble( 8.930, 9.001, 9.132, 8.912, 8.104, 8.571, 8.852, 9.230, 9.732, 10.29, 10.95, 11.68, 12.49, 12.57, 12.63, 6.449, 5.806, 8.973, 8.934, 8.823, 8.727, 8.641, 8.565, 8.496, 8.436, 8.383, 8.346, 8.307, 8.298, 8.281, 9.442, 9.437, 9.432, 9.429, 9.432, 9.433, 9.430, 9.437, 9.442, 9.446, 9.435)
HCALDepthLam=cms.vdouble(8.014, 8.078, 8.195, 7.998, 7.273, 7.692, 7.944, 8.283, 8.734, 9.235, 9.827, 10.482, 11.209, 11.281, 11.335, 5.788, 5.211, 8.053, 8.018, 7.918, 7.832, 7.755, 7.687, 7.625, 7.571, 7.523, 7.490, 7.455, 7.447, 7.432, 8.474, 8.469, 8.465, 8.462, 8.465, 8.466, 8.463, 8.469, 8.474, 8.477, 8.467)
),
),
CalorimeterProperties = cms.PSet(
# triplet for each p value: p, k_e(p), k_h(p) ...
RespCorrP = cms.vdouble(1.0, 1.0, 1.0, 1000.0, 1.0, 1.0),
PreshowerLayer2_thickness = cms.double(0.38), # layer2 thickness back to original
ECALEndcap_LightCollection = cms.double(0.023),
PreshowerLayer1_thickness = cms.double(1.65), # increase in thickness of layer 1 by 3%
PreshowerLayer1_mipsPerGeV = cms.double(17.85), # 50% decrease in mipsperGeV
PreshowerLayer2_mipsPerGeV = cms.double(59.5),
ECALBarrel_LightCollection = cms.double(0.03),
HadronicCalorimeterProperties= cms.PSet(
HCAL_Sampling = cms.double(0.0035),
# Watch out ! The following two values are defined wrt the electron shower simulation
# There are not directly related to the detector properties
HCAL_PiOverE = cms.double(0.2),
# HCAL_PiOverE = cms.double(0.4)
HCALAeff= cms.double(63.546),
HCALZeff= cms.double(29.),
HCALrho= cms.double(8.960),
HCALradiationLengthIncm= cms.double(1.43),
HCALradLenIngcm2= cms.double(12.86),
HCALmoliereRadius= cms.double(1.712),
HCALcriticalEnergy= cms.double(18.63E-3),
HCALinteractionLength= cms.double(15.05),
HCALetatow=cms.vdouble( 0.000, 0.087, 0.174, 0.261, 0.348, 0.435, 0.522, 0.609, 0.696, 0.783, 0.870, 0.957, 1.044, 1.131, 1.218, 1.305, 1.392, 1.479, 1.566, 1.653, 1.740, 1.830, 1.930, 2.043, 2.172, 2.322, 2.500, 2.650, 2.853, 3.000, 3.139, 3.314, 3.489, 3.664, 3.839, 4.013, 4.191, 4.363, 4.538, 4.716, 4.889, 5.191),
HCALDepthLam=cms.vdouble( 8.930, 9.001, 9.132, 8.912, 8.104, 8.571, 8.852, 9.230, 9.732, 10.29, 10.95, 11.68, 12.49, 12.57, 12.63, 6.449, 5.806, 8.973, 8.934, 8.823, 8.727, 8.641, 8.565, 8.496, 8.436, 8.383, 8.346, 8.307, 8.298, 8.281, 9.442, 9.437, 9.432, 9.429, 9.432, 9.433, 9.430, 9.437, 9.442, 9.446, 9.435)
),
BarrelCalorimeterProperties = cms.PSet(
#====== Geometrical material properties ========
# Light Collection efficiency
lightColl = cms.double(0.03),
# Light Collection uniformity
lightCollUnif = cms.double(0.003),
# Photostatistics (photons/GeV) in the homegeneous material
photoStatistics = cms.double(50.E3),
# Thickness of the detector in cm
thickness = cms.double(23.0),
#====== Global parameters of the material ========
# Interaction length in cm
interactionLength = cms.double(18.5),
Aeff = cms.double(170.87),
Zeff = cms.double(68.36),
rho = cms.double(8.280),
# Radiation length in g/cm^2
radLenIngcm2 = cms.double(7.37),
# ===== Those parameters might be entered by hand
# or calculated out of the previous ones
# Radiation length in cm. If value set to -1, FastSim uses internally the
# formula radLenIngcm2/rho
radLenIncm = cms.double(0.89),
# Critical energy in GeV. If value set to -1, FastSim uses internally the
# formula (2.66E-3*(x0*Z/A)^1.1): 8.74E-3 for ECAL EndCap
criticalEnergy = cms.double(8.74E-3),
# Moliere Radius in cm.If value set to -1, FastSim uses internally the
# formula : Es/criticalEnergy*X0 with Es=sqrt(4*Pi/alphaEM)*me*c^2=0.0212 GeV
# This value is known to be 2.190 cm for ECAL Endcap, but the formula gives 2.159 cm
moliereRadius = cms.double(2.190),
#====== Parameters for sampling ECAL ========
# Sampling Fraction: Fs = X0eff/(da+dp) where X0eff is the average X0
# of the active and passive media and da/dp their thicknesses
Fs = cms.double(0.0),
# e/mip for the calorimeter. May be estimated by 1./(1+0.007*(Zp-Za))
ehat = cms.double(0.0),
# a rough estimate of ECAL resolution sigma/E = resE/sqrt(E)
# it is used to generate Nspots in radial profiles.
resE = cms.double(1.),
# the width in cm of the active layer
da = cms.double(0.2),
# the width in cm of the passive layer
dp = cms.double(0.8),
# Is a homogenious detector?
bHom = cms.bool(True),
# Activate the LogDebug
debug = cms.bool(False)
),
EndcapCalorimeterProperties = cms.PSet(
#====== Geometrical material properties ========
# Light Collection efficiency
lightColl = cms.double(0.023),
# Light Collection uniformity
lightCollUnif = cms.double(0.003),
# Photostatistics (photons/GeV) in the homegeneous material
photoStatistics = cms.double(50.E3),
# Thickness of the detector in cm
thickness = cms.double(22.0),
#====== Global parameters of the material ========
# Interaction length in cm
interactionLength = cms.double(18.5),
Aeff = cms.double(170.87),
Zeff = cms.double(68.36),
rho = cms.double(8.280),
# Radiation length in g/cm^2
radLenIngcm2 = cms.double(7.37),
# ===== Those parameters might be entered by hand
# or calculated out of the previous ones
# Radiation length in cm. If value set to -1, FastSim uses internally the
# formula radLenIngcm2/rho
radLenIncm = cms.double(0.89),
# Critical energy in GeV. If value set to -1, FastSim uses internally the
# formula (2.66E-3*(x0*Z/A)^1.1): 8.74E-3 for ECAL EndCap
criticalEnergy = cms.double(8.74E-3),
# Moliere Radius in cm.If value set to -1, FastSim uses internally the
# formula : Es/criticalEnergy*X0 with Es=sqrt(4*Pi/alphaEM)*me*c^2=0.0212 GeV
# This value is known to be 2.190 cm for ECAL Endcap, but the formula gives 2.159 cm
moliereRadius = cms.double(2.190),
#====== Parameters for sampling ECAL ========
# Sampling Fraction: Fs = X0eff/(da+dp) where X0eff is the average X0
# of the active and passive media and da/dp their thicknesses
Fs = cms.double(0.0),
# e/mip for the calorimeter. May be estimated by 1./(1+0.007*(Zp-Za))
ehat = cms.double(0.0),
# a rough estimate of ECAL resolution sigma/E = resE/sqrt(E)
# it is used to generate Nspots in radial profiles.
resE = cms.double(1.),
# the width in cm of the active layer
da = cms.double(0.2),
# the width in cm of the passive layer
dp = cms.double(0.8),
# Is a homogenious detector?
bHom = cms.bool(True),
# Activate the LogDebug
debug = cms.bool(False)
)
),
Debug = cms.untracked.bool(False),
useDQM = cms.untracked.bool(False),
# EvtsToDebug = cms.untracked.vuint32(487),
HCAL = cms.PSet(
SimMethod = cms.int32(0), ## 0 - use HDShower, 1 - use HDRShower, 2 - GFLASH
GridSize = cms.int32(7),
#-- 0 - simple response, 1 - parametrized response + showering, 2 - tabulated response + showering
SimOption = cms.int32(2),
Digitizer = cms.untracked.bool(False),
samplingHBHE = cms.vdouble(125.44, 125.54, 125.32, 125.13, 124.46,
125.01, 125.22, 125.48, 124.45, 125.90,
125.83, 127.01, 126.82, 129.73, 131.83,
143.52, # HB
210.55, 197.93, 186.12, 189.64, 189.63,
190.28, 189.61, 189.60, 190.12, 191.22,
190.90, 193.06, 188.42, 188.42), #HE
samplingHF = cms.vdouble(0.383, 0.368),
samplingHO = cms.vdouble(231.0, 231.0, 231.0, 231.0, 360.0,
360.0, 360.0, 360.0, 360.0, 360.0,
360.0, 360.0, 360.0, 360.0, 360.0),
ietaShiftHB = cms.int32(1),
timeShiftHB = cms.vdouble(6.9, 6.9, 7.1, 7.1, 7.3, 7.5, 7.9, 8.3, 8.7, 9.1, 9.5, 10.3, 10.9, 11.5, 12.3, 14.1),
ietaShiftHE = cms.int32(16),
timeShiftHE = cms.vdouble(16.9, 15.7, 15.3, 15.3, 15.1, 14.9, 14.7, 14.7, 14.5, 14.5, 14.3, 14.3, 14.5, 13.9),
ietaShiftHO = cms.int32(1),
timeShiftHO = cms.vdouble(13.7, 13.7, 13.9, 14.1, 15.1, 15.7, 16.5, 17.3, 18.1, 19.1, 20.3, 21.9, 23.3, 25.5, 26.1),
ietaShiftHF = cms.int32(29),
timeShiftHF = cms.vdouble(50.7, 52.5, 52.9, 53.9, 54.5, 55.1, 55.1, 55.7, 55.9, 56.1, 56.1, 56.1, 56.5),
),
HFShower = cms.PSet(
ProbMax = cms.double(1.0),
CFibre = cms.double(0.5),
OnlyLong = cms.bool(True)
),
HFShowerLibrary = cms.PSet(
useShowerLibrary = cms.untracked.bool(True),
useCorrectionSL = cms.untracked.bool(True),
FileName = cms.FileInPath('SimG4CMS/Calo/data/HFShowerLibrary_oldpmt_noatt_eta4_16en_v3.root'),
BackProbability = cms.double(0.2),
TreeEMID = cms.string('emParticles'),
TreeHadID = cms.string('hadParticles'),
Verbosity = cms.untracked.bool(False),
ApplyFiducialCut = cms.bool(True),
BranchEvt = cms.untracked.string(''),
BranchPre = cms.untracked.string(''),
BranchPost = cms.untracked.string('')
)
),
GFlash = cms.PSet(
GflashExportToFastSim = cms.bool(True),
GflashHadronPhysics = cms.string('QGSP_BERT'),
GflashEMShowerModel = cms.bool(False),
GflashHadronShowerModel = cms.bool(True),
GflashHcalOuter = cms.bool(False),
GflashHistogram = cms.bool(False),
GflashHistogramName = cms.string('gflash_histogram.root'),
Verbosity = cms.untracked.int32(0),
bField = cms.double(3.8),
watcherOn = cms.bool(False),
tuning_pList = cms.vdouble()
)
)
FamosCalorimetryBlock.Calorimetry.ECAL.Digitizer = True
FamosCalorimetryBlock.Calorimetry.HCAL.Digitizer = True
from Configuration.Eras.Modifier_run2_common_cff import run2_common
run2_common.toModify(FamosCalorimetryBlock.Calorimetry.HFShowerLibrary, FileName = 'SimG4CMS/Calo/data/HFShowerLibrary_npmt_noatt_eta4_16en_v4.root' )
|
import os
import glob
import nska_deserialize as nd
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_bundle_id_and_names_from_plist(library_plist_file_path):
'''Parses Library.plist and returns a dictionary where Key=Bundle_ID, Value=Bundle_Name'''
bundle_info = {}
f = open(library_plist_file_path, 'rb')
plist = nd.deserialize_plist(f)
for k, v in plist.items():
bundle_info[v] = k
f.close()
return bundle_info
def get_bundle_info(files_found):
for file_path in files_found:
file_path = str(file_path)
if file_path.endswith('Library.plist') and os.path.dirname(file_path).endswith('UserNotificationsServer'):
bundle_info = get_bundle_id_and_names_from_plist(file_path)
return bundle_info
# If this is fs search, then only top level folder will be present, so append path and search it too
if file_path.endswith('UserNotificationsServer'):
plist_file_path = os.path.join(file_path, 'Library.plist')
if os.path.exists(plist_file_path):
bundle_info = get_bundle_id_and_names_from_plist(plist_file_path)
return bundle_info
return {}
def get_notificationsXII(files_found, report_folder, seeker):
bundle_info = get_bundle_info(files_found)
data_list = []
exportedbplistcount = 0
pathfound = str(files_found[0])
# logfunc(f'Posix to string is: {pathfound}')
for filepath in glob.iglob(pathfound + "/**", recursive=True):
# create directory where script is running from
if os.path.isfile(filepath): # filter dirs
file_name = os.path.splitext(os.path.basename(filepath))[0]
# create directory
if filepath.endswith('DeliveredNotifications.plist'):
bundle_id = os.path.basename(os.path.dirname(filepath))
# open the plist
p = open(filepath, "rb")
plist = nd.deserialize_plist(p)
# Empty plist will be { 'root': None }
if isinstance(plist, dict):
continue # skip it, it's empty
# Good plist will be a list of dicts
for item in plist:
creation_date = ''
title = ''
subtitle = ''
message = ''
other_dict = {}
bundle_name = bundle_info.get(bundle_id, bundle_id)
#if bundle_name == 'com.apple.ScreenTimeNotifications':
# pass # has embedded plist!
for k, v in item.items():
if k == 'AppNotificationCreationDate': creation_date = str(v)
elif k == 'AppNotificationMessage': message = v
elif k == 'AppNotificationTitle': title = v
elif k == 'AppNotificationSubtitle': subtitle = v
else:
if isinstance(v, bytes):
logfunc(f'Found binary data, look into this one later k={k}!')
elif isinstance(v, dict):
pass # recurse look for plists #TODO
elif isinstance(v, list):
pass # recurse look for plists #TODO
other_dict[k] = str(v)
if subtitle:
title += f'[{subtitle}]'
data_list.append((creation_date, bundle_name, title, message, str(other_dict)))
p.close()
elif "AttachmentsList" in file_name:
pass # future development
description = 'iOS > 12 Notifications'
report = ArtifactHtmlReport('iOS Notificatons')
report.start_artifact_report(report_folder, 'iOS Notifications', description)
report.add_script()
data_headers = ('Creation Time', 'Bundle', 'Title[Subtitle]', 'Message', 'Other Details')
report.write_artifact_data_table(data_headers, data_list, filepath)
report.end_artifact_report()
logfunc("Total notifications processed:" + str(len(data_list)))
#logfunc("Total exported bplists from notifications:" + str(exportedbplistcount))
tsvname = 'Notifications'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'Notifications'
timeline(report_folder, tlactivity, data_list, data_headers)
if len(data_list) == 0:
logfunc("No notifications found.")
|
<reponame>camiloaruiz/goatools
"""Test TermCounts object used in Resnik and Lin similarity calculations."""
from __future__ import print_function
import os
import sys
from goatools.base import get_godag
from goatools.associations import dnld_assc
from goatools.semantic import TermCounts
from goatools.semantic import get_info_content
def test_semantic_similarity(usr_assc=None):
"""Computing basic semantic similarities between GO terms."""
go2obj = get_go2obj()
# goids = go2obj.keys()
associations = [
'gene_association.GeneDB_Lmajor',
'gene_association.GeneDB_Pfalciparum',
'gene_association.GeneDB_Tbrucei',
'gene_association.GeneDB_tsetse',
'gene_association.PAMGO_Atumefaciens',
'gene_association.PAMGO_Ddadantii',
#'gene_association.PAMGO_Mgrisea', # TBD Resolve DB_Name containing '|'
'gene_association.PAMGO_Oomycetes',
'gene_association.aspgd',
'gene_association.cgd',
'gene_association.dictyBase',
'gene_association.ecocyc',
'gene_association.fb',
'gene_association.gonuts',
#'gene_association.gramene_oryza', # DB_Name
'gene_association.jcvi',
'gene_association.mgi',
'gene_association.pombase',
'gene_association.pseudocap',
'gene_association.reactome',
'gene_association.rgd',
'gene_association.sgd',
'gene_association.sgn',
'gene_association.tair',
'gene_association.wb',
'gene_association.zfin',
'goa_chicken.gaf',
'goa_chicken_complex.gaf',
'goa_chicken_isoform.gaf',
'goa_chicken_rna.gaf',
'goa_cow.gaf',
'goa_cow_complex.gaf',
'goa_cow_isoform.gaf',
'goa_cow_rna.gaf',
'goa_dog.gaf',
'goa_dog_complex.gaf',
'goa_dog_isoform.gaf',
'goa_dog_rna.gaf',
'goa_human.gaf',
'goa_human_complex.gaf',
'goa_human_isoform.gaf',
'goa_human_rna.gaf',
'goa_pdb.gaf',
'goa_pig.gaf',
'goa_pig_complex.gaf',
'goa_pig_isoform.gaf',
'goa_pig_rna.gaf',
#'goa_uniprot_all.gaf',
#'goa_uniprot_all_noiea.gaf',
]
if usr_assc is not None:
associations = [usr_assc]
cwd = os.getcwd()
for assc_name in associations: # Limit test numbers for speed
# Get all the annotations from arabidopsis.
assc_gene2gos = dnld_assc(os.path.join(cwd, assc_name), go2obj, prt=None)
# Calculate the information content of the single term, GO:0048364
# "Information content (GO:0048364) = 7.75481392334
# First get the counts of each GO term.
termcounts = TermCounts(go2obj, assc_gene2gos)
go_cnt = termcounts.gocnts.most_common()
#print termcounts.gocnts.most_common()
if go_cnt:
print("\n{ASSC}".format(ASSC=assc_name))
print(sorted(termcounts.aspect_counts.most_common()))
gocnt_max = go_cnt[0][1]
prt_info(termcounts, go_cnt, None)
prt_info(termcounts, go_cnt, gocnt_max/2.0)
prt_info(termcounts, go_cnt, gocnt_max/10.0)
def prt_info(termcounts, go_cnt, max_val):
"""Print the information content of a frequently used GO ID."""
go_id, cnt = get_goid(go_cnt, max_val)
infocontent = get_info_content(go_id, termcounts)
msg = 'Information content ({GO} {CNT:7,}) = {INFO:8.6f} {NAME}'
print(msg.format(GO=go_id, CNT=cnt, INFO=infocontent, NAME=termcounts.go2obj[go_id].name))
def get_goid(go_cnt, max_val):
"""Get frequently used GO ID."""
if max_val is not None:
for goid, cnt in go_cnt:
if cnt < max_val:
return goid, cnt
return go_cnt[-1][0], go_cnt[-1][1]
return go_cnt[0][0], go_cnt[0][1]
def get_go2obj():
"""Read GODag and return go2obj."""
godag = get_godag(os.path.join(os.getcwd(), "go-basic.obo"), loading_bar=None)
return {go:o for go, o in godag.items() if not o.is_obsolete}
if __name__ == '__main__':
ASSC_NAME = None if len(sys.argv) == 1 else sys.argv[1]
test_semantic_similarity(ASSC_NAME)
|
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from common.constants import GROUP_WORKING_TYPE_CHOICES
from accounts.forms import GroupForm, UserCreateForm, UserSetPasswordForm, UserForm, ConfigurationForm, AuthorityForm, \
GroupInviteForm, AuthorityInviteForm
from accounts.models import User, UserDevice, Configuration, NearbyArea, Authority, GroupInvite, CustomPermission, \
RoleCustomPermission, Party, AuthorityInvite
class UserAdmin(UserAdmin):
add_fieldsets = (
(None,
{'fields': ('domain', 'username', 'password', 'email')}
),
('Personal Info',
{'fields': ('first_name', 'last_name', 'contact', 'telephone', 'status')}
),
('Permissions',
{'fields': ('is_superuser', 'is_staff', 'is_active')}
),
)
fieldsets = (
(None,
{'fields': ('username', 'password', 'email')}
),
('Personal Info',
{'fields': ('first_name', 'last_name', 'contact', 'telephone', 'status')}
),
('Permissions',
{'fields': ('is_superuser', 'is_staff', 'is_active', 'domain', 'domains')}
),
('Important dates',
{'fields': ('date_joined', 'last_login')}
),
)
form = UserForm
add_form = UserCreateForm
change_password_form = UserSetPasswordForm
list_display = ('username', 'first_name', 'last_name', 'is_staff', 'date_joined')
def get_form(self, request, obj=None, **kwargs):
form = super(UserAdmin, self).get_form(request, obj, **kwargs)
form.created_by = request.user
return form
class GroupAdmin(admin.ModelAdmin):
form = GroupForm
list_display = ('name', 'group_type', )
list_filter = ('type', )
def group_type(self, obj):
return "%s" % GROUP_WORKING_TYPE_CHOICES[obj.type][1]
class ConfigurationAdmin(admin.ModelAdmin):
form = ConfigurationForm
search_fields = ('system', 'key',)
list_display = ('system', 'key', 'value')
class AuthorityAdmin(admin.ModelAdmin):
form = AuthorityForm
search_fields = ('name', 'code',)
readonly_fields = ('report_types',)
exclude = ('tags', 'area', 'users', 'inherits', 'deep_subscribes', 'admins')
class AuthorityInviteAdmin(admin.ModelAdmin):
form = AuthorityInviteForm
search_fields = ('authority__name',)
readonly_fields = ('code', 'expired_at',)
list_display = ('code', 'authority', 'status', 'created_at', 'expired_at')
class GroupInviteAdmin(admin.ModelAdmin):
form = GroupInviteForm
list_display = ('name', 'code')
class UserDeviceAdmin(admin.ModelAdmin):
list_display = ('user', 'device_id')
class RoleCustomPermissionAdmin(admin.ModelAdmin):
list_display = ('role', 'role_custom_permissions')
class PartyAdmin(admin.ModelAdmin):
list_display = ('name',)
readonly_fields = ('join_code',)
exclude = ('users',)
admin.site.register(User, UserAdmin)
admin.site.register(UserDevice, UserDeviceAdmin)
admin.site.unregister(Group)
# admin.site.register(Group, GroupAdmin)
admin.site.register(Configuration, ConfigurationAdmin)
admin.site.register(NearbyArea)
admin.site.register(Authority, AuthorityAdmin)
admin.site.register(AuthorityInvite, AuthorityInviteAdmin)
admin.site.register(GroupInvite, GroupInviteAdmin)
admin.site.register(CustomPermission)
admin.site.register(RoleCustomPermission, RoleCustomPermissionAdmin)
admin.site.register(Party, PartyAdmin) |
"""
Coupling Matrix
===============================================================================
>>> from techminer2 import *
>>> directory = "/workspaces/techminer2/data/"
>>> coupling_matrix(
... top_n=15,
... column='references',
... directory=directory,
... ).head()
document Chen T et al, 2016, PROC ACM SIGKDD INT CONF KNOW ... Saberi S et al, 2019, INT J PROD RES
global_citations 8178 ... 592
local_citations 1 ... 0
document global_citations local_citations ...
Chen T et al, 2016, PROC ACM SIGKDD INT CONF KNOW 8178 1 13 ... 0
Silver D et al, 2016, NATURE 6359 1 0 ... 0
Lundberg SM et al, 2017, ADV NEURAL INF PROCES ... 1949 2 0 ... 0
Geissdoerfer M et al, 2017, J CLEAN PROD 1600 0 0 ... 0
Young T et al, 2018, IEEE COMPUT INTELL MAG 1071 1 0 ... 0
<BLANKLINE>
[5 rows x 15 columns]
>>> coupling_matrix(
... column='author_keywords',
... min_occ=3,
... top_n=50,
... directory=directory,
... ).head()
document Schueffel P et al, 2016, J INNOV MANAG ... Kou G et al, 2021, FINANCIAL INNOV
global_citations 106 ... 14
local_citations 14 ... 0
document global_citations local_citations ...
Schueffel P et al, 2016, J INNOV MANAG 106 14 4 ... 0
Zavolokina L et al, 2016, FINANCIAL INNOV 43 7 1 ... 0
<NAME> et al, 2016, FINANCIAL INNOV 24 4 0 ... 0
<NAME> et al, 2016, FOUND MANAG 16 3 0 ... 0
<NAME> et al, 2017, NEW POLIT ECON 146 15 0 ... 0
<BLANKLINE>
[5 rows x 39 columns]
"""
from os.path import join
import numpy as np
import pandas as pd
from .load_all_documents import load_all_documents
from .load_filtered_documents import load_filtered_documents
from .records2documents import records2documents
from .tf_matrix import tf_matrix
# pyltin: disable=c0103
# pylint: disable=too-many-arguments
# pylint: disable=invalid-name
def coupling_matrix(
column,
top_n=100,
min_occ=1,
metric="global_citations",
directory="./",
):
if column == "references":
matrix = coupling_by_references_matrix_(
top_n=top_n,
metric=metric,
directory=directory,
)
documents = pd.read_csv(join(directory, "references.csv"))
else:
matrix = coupling_by_column_matrix_(
column=column,
min_occ=min_occ,
top_n=top_n,
metric=metric,
directory=directory,
)
documents = load_all_documents(directory=directory)
matrix = records2documents(matrix=matrix, documents=documents)
return matrix
# ---------------------------------------------------------------------------------------
def coupling_by_references_matrix_(
top_n=100,
metric="global_citations",
directory="./",
):
# selects the top_n most cited documents
documents = load_filtered_documents(directory=directory)
documents = documents.sort_values(by=metric, ascending=False)
documents = documents.head(top_n)
record_no = documents.record_no.values.copy()
# loads the cited references table
cited_references = pd.read_csv(join(directory, "cited_references_table.csv"))
cited_references = cited_references.loc[
cited_references.citing_id.map(lambda x: x in record_no)
]
cited_references["value"] = 1
cited_references = cited_references.drop_duplicates()
matrix_in_columns = cited_references.pivot(
index="citing_id", columns="cited_id", values="value"
)
matrix_in_columns = matrix_in_columns.fillna(0)
matrix_values = np.matmul(
matrix_in_columns.values, matrix_in_columns.transpose().values
)
# ---< index based on citations >----------------------------------------------------
record_no2global_citations = dict(
zip(documents.record_no, documents.global_citations)
)
record_no2local_citations = dict(
zip(documents.record_no, documents.local_citations)
)
global_citations = [
record_no2global_citations[record_no] for record_no in matrix_in_columns.index
]
local_citations = [
record_no2local_citations[record_no] for record_no in matrix_in_columns.index
]
new_index = pd.MultiIndex.from_tuples(
[
(record_no, global_citation, local_citation)
for record_no, global_citation, local_citation in zip(
matrix_in_columns.index, global_citations, local_citations
)
],
)
# -----------------------------------------------------------------------------------
coupling_matrix = pd.DataFrame(
matrix_values,
columns=new_index,
index=new_index,
)
# ---< remove rows and columns with no associations >---------------------------------
coupling_matrix = coupling_matrix.loc[:, (coupling_matrix != 0).any(axis=0)]
coupling_matrix = coupling_matrix.loc[(coupling_matrix != 0).any(axis=1), :]
coupling_matrix = coupling_matrix.astype(int)
return coupling_matrix
# ---------------------------------------------------------------------------------------
def coupling_by_column_matrix_(
column,
min_occ=1,
top_n=100,
metric="global_citations",
sep="; ",
directory="./",
):
matrix_in_columns = tf_matrix(
directory=directory,
column=column,
min_occ=min_occ,
sep=sep,
)
documents = load_filtered_documents(directory=directory)
documents = documents.sort_values(by=metric, ascending=False)
record_no = documents.head(top_n).record_no.values.copy()
matrix_in_columns = matrix_in_columns.loc[
matrix_in_columns.index.intersection(record_no)
]
matrix_values = np.matmul(
matrix_in_columns.values, matrix_in_columns.transpose().values
)
# ---< index based on citations >----------------------------------------------------
record_no2global_citations = dict(
zip(documents.record_no, documents.global_citations)
)
record_no2local_citations = dict(
zip(documents.record_no, documents.local_citations)
)
global_citations = [
record_no2global_citations[record_no] for record_no in matrix_in_columns.index
]
local_citations = [
record_no2local_citations[record_no] for record_no in matrix_in_columns.index
]
new_index = pd.MultiIndex.from_tuples(
[
(record_no, global_citation, local_citation)
for record_no, global_citation, local_citation in zip(
matrix_in_columns.index, global_citations, local_citations
)
],
)
# -----------------------------------------------------------------------------------
coupling_matrix = pd.DataFrame(
matrix_values,
columns=new_index,
index=new_index,
)
# ---< remove rows and columns with no associations >---------------------------------
coupling_matrix = coupling_matrix.loc[:, (coupling_matrix != 0).any(axis=0)]
coupling_matrix = coupling_matrix.loc[(coupling_matrix != 0).any(axis=1), :]
coupling_matrix = coupling_matrix.astype(int)
return coupling_matrix
|
<filename>src/zsl/utils/deploy/js_model_generator.py
"""
:mod:`zsl.utils.deploy.js_model_generator`
------------------------------------------
.. moduleauthor:: <NAME>
"""
from __future__ import unicode_literals
from builtins import object, range
import hashlib
import importlib
import json
import sys
from typing import Union
import sqlalchemy.exc
from sqlalchemy.orm import class_mapper
from zsl.utils.deploy.integrator import integrate_to_file
from zsl.utils.string_helper import camelcase_to_underscore, underscore_to_camelcase
model_tpl = """ {model_prefix}{model_name} = {model_fn}.extend({{
urlRoot: App.service_url + 'resource/{resource_name}',
schema: {schema}
}});
{collection_prefix}{model_name} = {collection_fn}.extend({{
model: {model_prefix}{model_name},
url: {model_prefix}{model_name}.prototype.urlRoot
}});
"""
list_opts_tpl = """function(callback, field) {{
field.setOptions(new {collection_prefix}{model_name}([],{{limit: 'unlimited'}}));
}}"""
class ModelGenerator(object):
def __init__(self, module, model_prefix="", collection_prefix="", model_fn="Atteq.bb.Model",
collection_fn="Atteq.bb.Collection"):
self.model_prefix = model_prefix
self.collection_prefix = collection_prefix
self.model_fn = model_fn
self.collection_fn = collection_fn
self.table_to_class = {}
self.models = importlib.import_module(module)
def _get_list_options(self, column):
fk = list(column.foreign_keys)[0]
table_name = underscore_to_camelcase(fk.column.table.name)
return list_opts_tpl.format(collection_prefix=self.collection_prefix, model_name=table_name)
def _map_table_name(self, model_names):
"""
Pre foregin_keys potrbejeme pre z nazvu tabulky zistit class,
tak si to namapujme
"""
for model in model_names:
if isinstance(model, tuple):
model = model[0]
try:
model_cls = getattr(self.models, model)
self.table_to_class[class_mapper(model_cls).tables[0].name] = model
except AttributeError:
pass
def generate_model(self, model_name, model_plural=None):
if model_name not in dir(self.models):
raise ImportError(
"Model [{name}] couldn't be found in {module}\n".format(name=model_name, module=self.models.__name__))
if model_plural is None:
model_plural = model_name + 's'
model = getattr(self.models, model_name)
schema = {}
mapper = class_mapper(model)
callbacks = []
for column in mapper.columns:
col_type = column.type.__class__.__name__
attrs = {}
if column.primary_key:
continue
if column.foreign_keys:
try:
attrs['type'] = 'AtteqSelect'
attrs['options'] = '__CALLBACK__%d' % len(callbacks)
callbacks.append(self._get_list_options(column))
# TODO uf uf uuuuf
fk_table = list(column.foreign_keys)[0].target_fullname.split('.')[0]
if fk_table in self.table_to_class:
attrs['foreign_model'] = '%s%s' % (self.model_prefix, self.table_to_class[fk_table])
except sqlalchemy.exc.NoReferencedTableError:
attrs['type'] = 'Text'
elif col_type == 'TEXT':
attrs['type'] = "TextArea"
elif col_type == 'Enum':
attrs['type'] = 'AtteqSelect' if column.nullable else 'Select'
attrs['options'] = column.type.enums
elif col_type == 'INTEGER':
attrs['type'] = 'Number'
else:
attrs['type'] = "Text"
if column.nullable:
attrs['nullable'] = True
schema[column.name] = attrs
schema = "\n ".join(json.dumps(schema, indent=4).split("\n"))
for i in range(len(callbacks)):
schema = schema.replace('"__CALLBACK__%d"' % i, callbacks[i])
return model_tpl.format(
model_name=model_name,
model_prefix=self.model_prefix,
collection_prefix=self.collection_prefix,
resource_name=camelcase_to_underscore(model_plural),
model_fn=self.model_fn,
collection_fn=self.collection_fn,
schema=schema
)
def generate_models(self, models):
js_models = []
self._map_table_name(models)
for model in models:
if isinstance(model, tuple):
model_name = model[0]
model_plural = model[1]
else:
model_name = model
model_plural = None
js_model = self.generate_model(model_name, model_plural)
js_models.append(js_model)
return js_models
def parse_model_arg(models):
# type: (list[str]) -> list[str]
"""Parse the model argument definition."""
return [tuple(m.split('/')) if '/' in m else m for m in models]
def generate_js_models(module, models, collection_prefix, model_prefix,
model_fn, collection_fn, marker, integrate, js_file):
# type: (str, str, str, str, str, str, str, bool, str) -> Union[str, None]
"""Generate models for Backbone Javascript applications.
:param module: module from which models are imported
:param models: model name, can be a tuple WineCountry/WineCountries as singular/plural
:param model_prefix: namespace prefix for models (app.models.)
:param collection_prefix: namespace prefix for collection (App.collections.)
:param model_fn: name of model constructor (MyApp.bb.Model)
:param collection_fn: name of collection constructor (MyApp.bb.Collection)
:param marker: marker to indicate the auto generated code
:param integrate: integrate to file
:param js_file: file to integrate
:return: generated models or nothing if writing into a file
"""
options = {
'model_prefix': model_prefix,
'collection_prefix': collection_prefix,
'model_fn': model_fn,
'collection_fn': collection_fn
}
generator = ModelGenerator(module,
**{o: options[o] for o in options if options[o] is not None})
models = generator.generate_models(parse_model_arg(models))
if integrate:
sys.stderr.write("Integrate is really experimental")
if not marker:
marker = hashlib.md5("{0}{1}".format(module, models)).hexdigest()
start = "// * -- START AUTOGENERATED %s -- * //\n" % marker
end = "// * -- END AUTOGENERATED %s -- * //\n" % marker
return integrate_to_file("\n".join(models), js_file, start, end)
else:
return "\n".join(models)
|
<filename>tictactoe/full_ttt.py
#!/usr/bin/env python3
import argparse
import copy
import sys
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Two-player Tic-Tac-Toe',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
return parser.parse_args()
# --------------------------------------------------
def print_board(state):
"""Given a state of cells print the board"""
bar = '-------------'
cells_tmpl = '| {} | {} | {} |'
cells = []
for i in range(0, 9):
cells.append(i + 1 if state[i] == '-' else state[i])
print('\n'.join([
bar,
cells_tmpl.format(cells[0], cells[1], cells[2]), bar,
cells_tmpl.format(cells[3], cells[4], cells[5]), bar,
cells_tmpl.format(cells[6], cells[7], cells[8]), bar
]))
# --------------------------------------------------
def has_won(cells):
winning = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7],
[2, 5, 8], [0, 4, 8], [2, 4, 6]]
for combo in winning:
group = list(map(lambda i: cells[i], combo))
for player in ['X', 'O']:
if all(x == player for x in group):
return player
return None
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
players = ['X', 'O']
initial_state = {'cells': list('-' * 9), 'player': players[0]}
wins = {'X': 0, 'O': 0, 'Draw': 0}
state = copy.deepcopy(initial_state)
print_board(state['cells'])
while (True):
move = input('Player {}: What is your move (q to quit)? '.format(
state['player'])).rstrip()
if move == 'q':
break
if not move.isdigit():
print('Move ({}) is not a digit'.format(move))
continue
move = int(move)
if move < 1 or move > 9:
print('Move ({}) must be between 1 and 9'.format(move))
continue
if state['cells'][move - 1] != '-':
print('Cell "{}" has already been chosen'.format(move))
continue
state['cells'][move - 1] = state['player']
print_board(state['cells'])
# Conditions for stopping play
winning_player = has_won(state['cells'])
board_is_full = '-' not in state['cells']
if winning_player or board_is_full:
if winning_player:
wins[winning_player] += 1
print('Player {} has won!'.format(state['player']))
elif board_is_full:
wins['Draw'] += 1
print('No more valid moves')
play_again = input('Play again? [yN] ')
if play_again.lower() == 'y':
state = copy.deepcopy(initial_state)
print_board(state['cells'])
continue
else:
break
state['player'] = 'O' if state['player'] == 'X' else 'X'
for player in players:
num = wins[player]
print('Player {} won {} time{}. {}'.format(
player,
num,
'' if num == 1 else 's',
'(Loser!)' if num == 0 else '',
))
print('There {} {} draw{}.'.format(wins['Draw'],
'was' if wins['Draw'] == 1 else 'were',
'' if wins['Draw'] == 1 else 's'))
print('Done.')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
<reponame>mindspore-ai/models<gh_stars>10-100
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data_loader_online"""
import os
import numpy as np
import pandas as pd
from PIL import Image
import mindspore.dataset.vision.py_transforms as P
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset as de
class TripletFaceDataset:
def __init__(self, root_dir, csv_name, num_triplets):
self.root_dir = root_dir
self.df = pd.read_csv(csv_name)
self.num_triplets = num_triplets
self.training_triplets = self.generate_triplets(self.df, self.num_triplets)
print("===init TripletFaceDataset===", flush=True)
@staticmethod
def generate_triplets(df, num_triplets):
def make_dictionary_for_face_class(df):
'''
- face_classes = {'class0': [class0_id0, ...], 'class1': [class1_id0, ...], ...}
'''
face_classes = dict()
for idx, label in enumerate(df['class']):
if label not in face_classes:
face_classes[label] = []
face_classes[label].append(df.iloc[idx, 0])
return face_classes
triplets = []
classes = df['class'].unique()
face_classes = make_dictionary_for_face_class(df)
for _ in range(num_triplets):
pos_class = np.random.choice(classes)
neg_class = np.random.choice(classes)
while len(face_classes[pos_class]) < 2:
pos_class = np.random.choice(classes)
while pos_class == neg_class:
neg_class = np.random.choice(classes)
pos_name = df.loc[df['class'] == pos_class, 'name'].values[0]
neg_name = df.loc[df['class'] == neg_class, 'name'].values[0]
if len(face_classes[pos_class]) == 2:
ianc, ipos = np.random.choice(2, size=2, replace=False)
else:
ianc = np.random.randint(0, len(face_classes[pos_class]))
ipos = np.random.randint(0, len(face_classes[pos_class]))
while ianc == ipos:
ipos = np.random.randint(0, len(face_classes[pos_class]))
ineg = np.random.randint(0, len(face_classes[neg_class]))
triplets.append(
[face_classes[pos_class][ianc], face_classes[pos_class][ipos], face_classes[neg_class][ineg],
pos_class, neg_class, pos_name, neg_name])
return triplets
def __getitem__(self, idx):
anc_id, pos_id, neg_id, pos_class, neg_class, pos_name, neg_name = self.training_triplets[idx]
anc_img = os.path.join(self.root_dir, str(pos_name), str(anc_id) + '.png')
pos_img = os.path.join(self.root_dir, str(pos_name), str(pos_id) + '.png')
neg_img = os.path.join(self.root_dir, str(neg_name), str(neg_id) + '.png')
anc_img = Image.open(anc_img).convert("RGB")
pos_img = Image.open(pos_img).convert("RGB")
neg_img = Image.open(neg_img).convert("RGB")
pos_class = np.array([pos_class]).astype(np.int32)
neg_class = np.array([neg_class]).astype(np.int32)
return (anc_img, pos_img, neg_img, pos_class, neg_class)
def __len__(self):
return len(self.training_triplets)
def get_dataloader(train_root_dir, valid_root_dir,
train_csv_name, valid_csv_name,
num_train_triplets, num_valid_triplets,
batch_size, num_workers, group_size,
rank, shuffle, mode='train'):
data_transforms = {
'train': [
C.RandomResize(size=(224, 224)),
C.RandomHorizontalFlip(),
P.ToTensor(),
P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
'train_valid': [
C.RandomResize(size=(224, 224)),
C.RandomHorizontalFlip(),
P.ToTensor(),
P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
'valid': [
C.RandomResize(size=(224, 224)),
P.ToTensor(),
P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]}
dataset_column_names = ["anc_img", "pos_img", "neg_img", "pos_class", "neg_class"]
dataloaders = {"train": None, "valid": None, "train_valid": None}
if mode not in dataloaders:
raise ValueError("mode should be in", data_loaders.keys())
if mode == "train":
face_dataset = TripletFaceDataset(root_dir=train_root_dir,
csv_name=train_csv_name,
num_triplets=num_train_triplets)
sampler = de.DistributedSampler(group_size, rank, shuffle=shuffle)
dataloaders[mode] = de.GeneratorDataset(face_dataset,
dataset_column_names,
num_samples=10000,
num_parallel_workers=num_workers,
python_multiprocessing=False)
dataloaders[mode].add_sampler(sampler)
dataloaders[mode] = dataloaders[mode].map(input_columns=["anc_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].map(input_columns=["pos_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].map(input_columns=["neg_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].batch(batch_size, num_parallel_workers=32, drop_remainder=True)
data_size1 = len(face_dataset)
elif mode == "train_valid":
face_dataset = TripletFaceDataset(root_dir=train_root_dir,
csv_name=train_csv_name,
num_triplets=num_train_triplets)
sampler = None
dataloaders[mode] = de.GeneratorDataset(face_dataset,
dataset_column_names,
num_samples=10000,
num_parallel_workers=num_workers,
python_multiprocessing=False)
dataloaders[mode] = dataloaders[mode].map(input_columns=["anc_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].map(input_columns=["pos_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].map(input_columns=["neg_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].batch(batch_size, num_parallel_workers=32, drop_remainder=True)
data_size1 = len(face_dataset)
else:
face_dataset = TripletFaceDataset(root_dir=valid_root_dir,
csv_name=valid_csv_name,
num_triplets=num_valid_triplets)
sampler = None
dataloaders[mode] = de.GeneratorDataset(face_dataset, column_names=dataset_column_names,
sampler=sampler, num_parallel_workers=num_workers)
dataloaders[mode] = dataloaders[mode].map(input_columns=["anc_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].map(input_columns=["pos_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].map(input_columns=["neg_img"], operations=data_transforms[mode])
dataloaders[mode] = dataloaders[mode].batch(batch_size, num_parallel_workers=32, drop_remainder=True)
data_size1 = len(face_dataset)
return dataloaders, data_size1
|
#specialized version of class SegmentTree2DFast
#(+,+) update-query
class SegmentTree2DSum:
#(uo,ui,qo,qi,uor)=(+,0,+,0,*)
class Node:
def __init__(self,A,AZ):
self.A=A
self.AZ=AZ
class SegmentTree1DSum:
class Node:
def __init__(self,V,Z):
self.V=V
self.Z=Z
def __init__(self,A):
self.N=len(A)
self.nodes=[None for i in range(4*self.N)]
self.build(0,0,self.N-1,A)
def build(self,nid,nx0,nx1,A):
if nx0==nx1:
self.nodes[nid]=self.Node(A[nx0],ui)
else:
m=(nx0+nx1)//2
nl=2*nid+1
nr=2*nid+2
self.build(nl,nx0,m,A)
self.build(nr,m+1,nx1,A)
self.nodes[nid]=self.Node(self.nodes[nl].V+self.nodes[nr].V,0)
def U(self,x0,x1,v):
self.UN(0,0,self.N-1,x0,x1,v)
def UN(self,nid,nx0,nx1,x0,x1,v):
#(nid,nx0,nx1) is the node with id number ==nid, covering [nx0,nx1]
if x0<=nx0 and nx1<=x1:
self.nodes[nid].Z+=v
elif x0<=nx1 and nx0<=x1:
m=(nx0+nx1)//2
nl=2*nid+1
nr=2*nid+2
self.UN(nl,nx0,m,x0,x1,v)
self.UN(nr,m+1,nx1,x0,x1,v)
self.nodes[nid].V=(self.nodes[nl].V+self.nodes[nl].Z*(m+1-nx0))+(self.nodes[nr].V+self.nodes[nr].Z*(nx1-m))
def Q(self,x0,x1):
return self.QN(0,0,self.N-1,x0,x1)
def QN(self,nid,nx0,nx1,x0,x1):
if x0<=nx0 and nx1<=x1:
return self.nodes[nid].V+self.nodes[nid].Z*(nx1+1-nx0)
elif x0<=nx1 and nx0<=x1:
m=(nx0+nx1)//2
nl=2*nid+1
nr=2*nid+2
return (self.QN(nl,nx0,m,x0,x1)+self.QN(nr,m+1,nx1,x0,x1))+self.nodes[nid].Z*(min(nx1,x1)-max(nx0,x0)+1)
else:
return qi
def __init__(self,A):
self.N=len(A)
self.M=len(A[0])
self.F=(lambda a,v,sz:self.uo(a,self.uor(v,sz)))
self.nodes=[None for i in range(4*self.N)]
self.build(0,0,self.N-1,A)
def build(self,nid,nx0,nx1,A):
a1D=[]
if nx0==nx1:
a1D=A[nx0].copy()
else:
m=(nx0+nx1)//2
nl=2*nid+1
nr=2*nid+2
al=self.build(nl,nx0,m,A)
ar=self.build(nr,m+1,nx1,A)
a1D=[al[j]+ar[j] for j in range(M)]
self.nodes[nid]=self.Node(self.SegmentTree1DSum(a1D),
self.SegmentTree1DSum([0 for j in range(M)]))
#print("[{},{}]={}".format(nx0,nx1,a1D))
return a1D
def U(self,x0,x1,y0,y1,v):
self.UN(0,0,self.N-1,x0,x1,y0,y1,v)
def UN(self,nid,nx0,nx1,x0,x1,y0,y1,v):
if x0<=nx0 and nx1<=x1:
self.nodes[nid].AZ.U(y0,y1,v)
elif x0<=nx1 and nx0<=x1:
m=(nx0+nx1)//2
nl=2*nid+1
nr=2*nid+2
self.UN(nl,nx0,m,x0,x1,y0,y1,v)
self.UN(nr,m+1,nx1,x0,x1,y0,y1,v)
self.nodes[nid].A.U(y0,y1,v*(min(nx1,x1)-max(nx0,x0)+1))
def Q(self,x0,x1,y0,y1):
return self.QN(0,0,self.N-1,x0,x1,y0,y1)
def QN(self,nid,nx0,nx1,x0,x1,y0,y1):
if x0<=nx0 and nx1<=x1:
return self.nodes[nid].A.Q(y0,y1)+self.nodes[nid].AZ.Q(y0,y1)*(nx1-nx0+1)
elif x0<=nx1 and nx0<=x1:
m=(nx0+nx1)//2
nl=2*nid+1
nr=2*nid+2
return (self.QN(nl,nx0,m,x0,x1,y0,y1)+self.QN(nr,m+1,nx1,x0,x1,y0,y1))+self.nodes[nid].AZ.Q(y0,y1)*(min(nx1,x1)-max(nx0,x0)+1)
else:
return qi
|
<filename>nox/src/nox/netapps/monitoring/monitoring.py
'''monitoring core'''
# Written for Ripcord by
# Author: <NAME> (<EMAIL>)
# Ported to NOX to use LAVI/messenger by
# Author: <NAME> (<EMAIL>)
import time
import logging
from collections import defaultdict
from collections import deque
from twisted.python import log
from nox.coreapps.pyrt.pycomponent import Table_stats_in_event, \
Aggregate_stats_in_event
from nox.lib.core import Component, Flow_mod_event, Datapath_join_event, \
Datapath_leave_event, Port_stats_in_event, Table_stats_in_event, \
Aggregate_stats_in_event, CONTINUE, STOP, pyevent, Flow_stats_in_event, \
Queue_stats_in_event
import nox.lib.openflow as openflow
import nox.lib.pyopenflow as of
from nox.lib.packet.packet_utils import mac_to_str
from nox.lib.netinet.netinet import datapathid, create_ipaddr, c_htonl, c_ntohl
from nox.lib.directory import Directory, LocationInfo
from nox.lib.packet.packet_utils import longlong_to_octstr
from switchqueryreplyevent import \
SwitchQueryReplyEvent, SwitchQuery as MonitorSwitchQuery
from linkutilreplyevent import LinkUtilizationReplyEvent
from nox.coreapps.messenger.pyjsonmsgevent import JSONMsg_event
import simplejson as json
# Default values for the periodicity of polling for each class of
# statistic
# Use a poll frequency of 20ms per switch (this frequency works)
#DEFAULT_POLL_TABLE_STATS_PERIOD = 0.02
#DEFAULT_POLL_PORT_STATS_PERIOD = 0.03
#DEFAULT_POLL_AGGREGATE_STATS_PERIOD = 0.04
# For testing, poll less aggressively
DEFAULT_POLL_TABLE_STATS_PERIOD = 20 # seconds
DEFAULT_POLL_PORT_STATS_PERIOD = 20 # seconds
DEFAULT_POLL_AGGREGATE_STATS_PERIOD = 20 # seconds
DEFAULT_POLL_UTIL_PERIOD = 1 # seconds
# Arbitrary limits on how much stats history we keep per switch
DEFAULT_COLLECTION_EPOCH_DURATION = 10 # seconds
DEFAULT_MAX_STATS_SNAPSHOTS_PER_SWITCH = 10
# Static log handle
lg = logging.getLogger('monitoring')
## \ingroup noxcomponents
# Collects and maintains switch and port stats for the network.
#
# Monitors switch and port stats by sending out port_stats requests
# periodically to all connected switches.
#
# The primary method of accessing the ports stats is through the
# webserver (see switchstatsws.py) however, components can also
# register port listeners which are called each time stats are
# received for a particular port.
#
class Monitoring(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
self.ctxt_ = ctxt
lg.debug( 'Simple monitoring started!' )
# We'll keep track of the logical time we've been
# collecting data so that we can group snapshots from different
# switches in the network across time i.e. we want to look
# at changes in monitoring data within a single
# collection epoch as well as across collection epochs
self.collection_epoch = 0
# Keep track of the latest collection epoch included in a
# stats reply so we know what
self.max_stats_reply_epoch = -1
# Keep track of the set of the switches we are monitoring.
# As switches join and leave the network we can enable or disable
# the timers that poll them for their stats
self.switches = set([])
# Track the switches that we haven't heard from in a while
self.silent_switches = set([])
# Store the snapshots of the switch stats
# [dpid][<snapshot1>,<snapshot2>,...,<snapshotN>]
self.snapshots = {}
# Store the capabilities of each port for each switch
# [dpid][<port1>,<port2>,...,<portN>
self.port_cap = {}
# Pending queries - things we've been asked for but have not yet
# satisfied
self.pending_switch_queries = set([])
# Mapping of gui query ID's to streams. This way Monitoring knows where
# to send a specific reply from a switch stats query
self.pending_gui_queries = {}
# Subscribers for monitoring messages
#(eg. self.subscribers["linkutils"] = [guistream]
self.subscribers = {}
"""
# Set defaults
self.table_stats_poll_period = DEFAULT_POLL_TABLE_STATS_PERIOD
self.aggregate_stats_poll_period = DEFAULT_POLL_AGGREGATE_STATS_PERIOD
self.port_stats_poll_period = DEFAULT_POLL_PORT_STATS_PERIOD
self.collection_epoch_duration = DEFAULT_COLLECTION_EPOCH_DURATION
self.max_snapshots_per_switch = DEFAULT_MAX_STATS_SNAPSHOTS_PER_SWITCH
"""
def aggregate_timer(self, dpid):
flow = of.ofp_match()
flow.wildcards = 0xffff
self.ctxt.send_aggregate_stats_request(dpid, flow, 0xff)
self.post_callback(MONITOR_TABLE_PERIOD, lambda : self.aggregate_timer(dpid))
def table_timer(self, dpid):
self.ctxt.send_table_stats_request(dpid)
self.post_callback(MONITOR_TABLE_PERIOD, lambda : self.table_timer(dpid))
def port_timer(self, dpid):
self.ctxt.send_port_stats_request(dpid, OFPP_NONE)
self.post_callback(MONITOR_PORT_PERIOD, lambda : self.port_timer(dpid))
# For each new datapath that joins, create a timer loop that monitors
# the statistics for that switch
def datapath_join_callback(self, dpid, stats):
self.post_callback(MONITOR_TABLE_PERIOD, lambda : self.table_timer(dpid))
self.post_callback(MONITOR_PORT_PERIOD + 1, lambda : self.port_timer(dpid))
self.post_callback(MONITOR_AGGREGATE_PERIOD + 2, lambda : self.aggregate_timer(dpid))
def configure(self, configuration):
#self.register_event(JSONMsg_event.static_get_name())
JSONMsg_event.register_event_converter(self.ctxt)
self.register_python_event( LinkUtilizationReplyEvent.NAME )
self.register_python_event(SwitchQueryReplyEvent.NAME)
self.register_handler( SwitchQueryReplyEvent.NAME, \
self.handle_switch_query_reply_event )
# Set everything to the default values initially
self.table_stats_poll_period = DEFAULT_POLL_TABLE_STATS_PERIOD
self.aggregate_stats_poll_period = DEFAULT_POLL_AGGREGATE_STATS_PERIOD
self.port_stats_poll_period = DEFAULT_POLL_PORT_STATS_PERIOD
self.collection_epoch_duration = DEFAULT_COLLECTION_EPOCH_DURATION
self.max_snapshots_per_switch = DEFAULT_MAX_STATS_SNAPSHOTS_PER_SWITCH
# Start our logical clock
self.fire_epoch_timer()
# Start some internal debugging
self.fire_stats_debug_timer()
self.fire_utilization_broadcasts()
lg.debug( "Finished configuring monitoring" )
def install(self):
self.register_handler( JSONMsg_event.static_get_name(), \
lambda event: self.handle_jsonmsg_event(event))
""" Installs the monitoring component. Register all the event handlers\
and sets up the switch polling timers."""
self.register_handler( Datapath_join_event.static_get_name(), \
lambda event: self.handle_datapath_join(event))
self.register_handler( Datapath_leave_event.static_get_name(), \
lambda event: self.handle_datapath_leave(event))
# Stats reporting events
self.register_handler( Table_stats_in_event.static_get_name(), \
lambda event: self.handle_table_stats_in(event))
self.register_handler( Port_stats_in_event.static_get_name(), \
lambda event: self.handle_port_stats_in(event))
self.register_handler( Aggregate_stats_in_event.static_get_name(), \
lambda event: self.handle_aggregate_stats_in(event))
self.register_handler( Flow_stats_in_event.static_get_name(), \
lambda event: self.handle_flow_stats_in(event))
self.register_handler( Queue_stats_in_event.static_get_name(), \
lambda event: self.handle_queue_stats_in(event))
self.register_handler( LinkUtilizationReplyEvent.NAME, \
self.handle_link_util_reply_event )
def handle_jsonmsg_event(self, e):
msg = json.loads(e.jsonstring)
if msg["type"] != "monitoring" :
return CONTINUE
if not "command" in msg:
lg.debug( "Received message with no command field" )
return CONTINUE
if msg["command"] == "subscribe":
# Add stream to interested entities for this msg_type
if not msg["msg_type"] in self.subscribers:
self.subscribers[msg["msg_type"]] = []
self.subscribers[msg["msg_type"]].append(e)
return CONTINUE
# Store
self.pending_gui_queries[msg["xid"]] = e
lg.debug( "got JSON switch query request")
dpid = int(str(msg["dpid"]), 16)
if msg["command"] == "portstats" :
self.pending_switch_queries.add( msg["xid"] )
self.send_port_stats_request( dpid, msg["xid"] )
elif msg["command"] == "tablestats":
self.pending_switch_queries.add( msg["xid"] )
self.send_table_stats_request( dpid, msg["xid"] )
elif msg["command"] == "aggstats":
self.pending_switch_queries.add( msg["xid"] )
flow = of.ofp_match()
flow.wildcards = 0xffffffff
self.send_aggregate_stats_request( dpid, flow, 0xff, msg["xid"] )
elif msg["command"] == "latestsnapshot":
# Look at the latest snapshot we have (if any) for this switch
# and post a custom event
if dpid in self.switches:
self.pending_switch_queries.add( msg["xid"] )
latest_snapshot = self.get_latest_switch_stats(dpid)
if latest_snapshot != None:
reply = SwitchQueryReplyEvent( msg["xid"], dpid, \
SwitchQueryReplyEvent.QUERY_LATEST_SNAPSHOT,\
latest_snapshot )
self.post( pyevent( SwitchQueryReplyEvent.NAME, reply ) )
elif msg["command"] == "flowstats":
self.pending_switch_queries.add( msg["xid"] )
flow = of.ofp_match()
flow.wildcards = 0xffffffff
self.send_flow_stats_request(dpid, flow, 0xff, msg["xid"])
elif msg["command"] == "queuestats":
self.pending_switch_queries.add( msg["xid"] )
self.send_queue_stats_request(dpid, msg["xid"])
return CONTINUE
def handle_switch_query_reply_event(self, event):
lg.debug( "handling switch_query_reply_event" )
if event.pyevent.xid in self.pending_switch_queries:
# Remove the xid from our todo list
self.pending_switch_queries.remove( event.pyevent.xid )
# Look at the query type and craft the right kind of
# message
msg = {}
msg["type"] = "monitoring"
msg["xid"] = event.pyevent.xid
msg["dpid"] = event.pyevent.dpid
msg["data"] = json.dumps(event, sort_keys=True, \
default=self.encode_switch_query)
# Figure out what kind of query reply came back
if event.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_PORT_STATS:
lg.debug( "got port stats reply" )
msg["msg_type"] = "portstats"
elif event.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_TABLE_STATS:
lg.debug( "got table stats reply" )
msg["msg_type"] = "tablestats"
elif event.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_AGG_STATS:
lg.debug( "got agg stats reply" )
msg["msg_type"] = "aggstats"
elif event.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_LATEST_SNAPSHOT:
lg.debug( "got latest snapshot reply" )
msg["msg_type"] = "latestsnapshot"
elif event.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_FLOW_STATS:
lg.debug( "got flow stats reply" )
msg["msg_type"] = "flowstats"
elif event.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_QUEUE_STATS:
lg.debug( "got queue stats reply" )
msg["msg_type"] = "queuestats"
stream = self.pending_gui_queries.pop( event.pyevent.xid )
stream.reply(json.dumps(msg))
return CONTINUE
# Construct and send our own stats request messages so we can make use
# of the xid field (store our logical clock/collection epoch here) to
# detect whether stats replies from switches are delayed, lost or
# re-ordered
def send_table_stats_request(self, dpid, xid=-1):
"""Send a table stats request to a switch (dpid).
@param dpid - datapath/switch to contact
"""
# Build the request
request = of.ofp_stats_request()
if xid == -1:
request.header.xid = c_htonl(long(self.collection_epoch))
else:
request.header.xid = c_htonl(xid)
request.header.type = openflow.OFPT_STATS_REQUEST
request.type = openflow.OFPST_TABLE
request.flags = 0
request.header.length = len(request.pack())
self.send_openflow_command(dpid, request.pack())
def send_port_stats_request(self, dpid, xid=-1):
"""Send a port stats request to a switch (dpid).
@param dpid - datapath/switch to contact
"""
# Build port stats request message
request = of.ofp_stats_request()
if xid == -1:
request.header.xid = c_htonl(long(self.collection_epoch))
else:
request.header.xid = c_htonl(xid)
request.header.type = openflow.OFPT_STATS_REQUEST
request.type = openflow.OFPST_PORT
request.flags = 0
# Need a body for openflow v1.x.x but not for 0.9.x
# Construct body as a port_stats_request - need something packable
body = of.ofp_port_stats_request()
# Get stats on all ports using OFPP_NONE
body.port_no = openflow.OFPP_NONE
request.header.length = len(request.pack()) + len(body.pack())
self.send_openflow_command(dpid, request.pack() + body.pack())
def send_aggregate_stats_request(self, dpid, match, table_id, xid=-1):
"""Send an aggregate stats request to a switch (dpid).
@param dpid - datapath/switch to contact
@param match - ofp_match structure
@param table_id - table to query
"""
# Create the stats request header
request = of.ofp_stats_request()
if xid == -1:
request.header.xid = c_htonl(long(self.collection_epoch))
else:
request.header.xid = c_htonl(xid)
request.header.type = openflow.OFPT_STATS_REQUEST
request.type = openflow.OFPST_AGGREGATE
request.flags = 0
# Create the stats request body
body = of.ofp_aggregate_stats_request()
body.match = match
body.table_id = table_id
body.out_port = openflow.OFPP_NONE
# Set the header length
request.header.length = len(request.pack()) + len(body.pack())
self.send_openflow_command(dpid, request.pack() + body.pack())
def send_flow_stats_request(self, dpid, match, table_id, xid=-1):
"""Send a flow stats request to a switch (dpid).
@param dpid - datapath/switch to contact
@param match - ofp_match structure
@param table_id - table to query
"""
# Create the stats request header
request = of.ofp_stats_request()
if xid == -1:
request.header.xid = c_htonl(long(self.collection_epoch))
else:
request.header.xid = c_htonl(xid)
lg.debug( "sending flow stats request xid: %d" % \
(c_htonl(request.header.xid)) )
request.header.type = openflow.OFPT_STATS_REQUEST
request.type = openflow.OFPST_FLOW
request.flags = 0
# Create the stats request body
body = of.ofp_flow_stats_request()
body.match = match
body.table_id = table_id
body.out_port = openflow.OFPP_NONE
request.header.length = len(request.pack()) + len(body.pack())
self.send_openflow_command(dpid, request.pack() + body.pack())
def send_queue_stats_request(self, dpid, xid=-1):
lg.debug( "sending queue stats request" )
"""Send a queue stats request to a switch (dpid).
@param dpid - datapath/switch to contact
"""
# Create the stats request header
request = of.ofp_stats_request()
if xid == -1:
request.header.xid = c_htonl(long(self.collection_epoch))
else:
request.header.xid = c_htonl(xid)
request.header.type = openflow.OFPT_STATS_REQUEST
request.type = openflow.OFPST_QUEUE
request.flags = 0
# Create the stats request body
body = of.ofp_queue_stats_request()
body.port_no = openflow.OFPP_ALL
body.queue_id = openflow.OFPQ_ALL
request.header.length = len(request.pack()) + len(body.pack())
self.send_openflow_command(dpid, request.pack() + body.pack())
# Command API
def count_silent_switches(self):
"""Count the number of switches that have not responded to stats
requests."""
return len(self.silent_switches)
def get_all_silent_switches(self):
"""Return the set of switches that have not responded to stats
requests."""
return self.silent_switches
def get_all_switch_stats(self, dpid):
"""API call to get all the recent readings of switch stats
@param dpid - datapath/switch snapshots to return
"""
if dpid in self.switches:
return self.snapshots[dpid]
else:
return {}
def get_max_stats_reply_epoch(self):
"""API call to return the latest epoch for which we have at
least 1 switch stats reply"""
return self.max_stats_reply_epoch
def get_latest_port_bps(self, time_consistent=True):
port_utilizations = []
# Look at the latest reply epoch
# For each switch get any snapshot that is ready with
# collected for the latest reply epoch
# Go through that snapshot and pull out the port
# info
# Create portutilization instance:
# [dpid,port,bps_transmitted,bps_received]
for dpid in self.switches:
# Get the latest snapshot for each switch
latest_snapshot = self.get_latest_switch_stats(dpid)
# If there's a recent snapshot see if it's ready (complete)
# AND for the most recent collection epoch
if latest_snapshot != None and latest_snapshot.ready():
#lg.debug( "found latest snapshot for dpid 0x%x" % (dpid) )
# If we want the snapshots to all be from the same
# most recent collection epoch then ignore the ones that aren't
if time_consistent and (latest_snapshot.collection_epoch != \
self.max_stats_reply_epoch):
continue
#if latest_snapshot.ready() and \
#latest_snapshot.collection_epoch\
#== self.max_stats_reply_epoch:
# Now go thru the snapshot's port info and
# create port utilization instances and
# add them to the list
for port in latest_snapshot.port_info:
portinfo = latest_snapshot.port_info[port]
port_util = PortUtilization()
port_util.dpid = dpid
port_util.port = portinfo.port_number
port_util.bps_transmitted = \
portinfo.estimate_bits_sent_per_sec()
port_util.bps_received = \
portinfo.estimate_bits_received_per_sec()
port_util.capacity = (self.port_cap[port_util.dpid][port_util.port].to_dict())['max_speed']
port_utilizations.append(port_util)
else:
pass
return port_utilizations
def get_latest_switch_stats(self, dpid):
"""API call to get the latest stats snapshot for a switch
@param dpid - datapath/switch snapshot to return
"""
if dpid not in self.switches:
return None
switch_stats_q = self.snapshots[dpid]
if len(switch_stats_q) > 0:
return switch_stats_q[0]
else:
return None
def get_all_port_capabilities(self, dpid):
"""API call to get all the port capabilities for a switch
@param dpid - datapath/switch port capabilities to return
"""
if dpid not in self.port_cap:
return None
else:
return self.port_cap[dpid]
def get_port_capabilities(self, dpid, port_num):
"""API call to get the capabilities of a specific port for a switch
@param dpid - datapath/switch to get capabilities for
@param port_num - specific port to get capabilities for
"""
if dpid not in self.port_cap:
return None
else:
return (self.port_cap[dpid])[port_num]
# Timers
# Stats debugging timer
def fire_stats_debug_timer(self):
self.get_latest_port_bps()
# re-post timer at some multiple of the collection epoch
self.post_callback( self.collection_epoch_duration*2, \
self.fire_stats_debug_timer )
def fire_utilization_broadcasts(self):
port_utils = self.get_latest_port_bps()
# Set xid = -1 when its unsolicited
event = LinkUtilizationReplyEvent( -1, port_utils )
# Post event
self.post( pyevent( LinkUtilizationReplyEvent.NAME, event ) )
self.post_callback( DEFAULT_POLL_UTIL_PERIOD,\
self.fire_utilization_broadcasts )
# Logical clock timer
def fire_epoch_timer(self):
"""Handler updates the logical clock used by Monitoring."""
'''
lg.debug( "---silent switches start at epoch: %d---" \
% (self.collection_epoch) )
for dpid in self.silent_switches:
lg.debug( dpid )
if self.topo.all_connected():
self.topo.setNodeFaultStatus(dpid, True)
# Publish an event for each silent switch
silentSwitch = SilentSwitchEvent( -1, dpid )
self.post( pyevent( SilentSwitchEvent.NAME, silentSwitch ) )
lg.debug( "---silent switches end at epoch: %d---" \
% (self.collection_epoch))
# Add all switches to the silent list at the start of every
# epoch. We'll remove them as they reply to stats requests
for dpid in self.switches:
if dpid not in self.silent_switches:
#self.topo.setNodeFaultStatus(dpid, False)
self.silent_switches.add(dpid)
'''
# Update the epoch
self.collection_epoch += 1
lg.debug( "updated clock: %d" % (self.collection_epoch) )
self.post_callback( self.collection_epoch_duration, \
self.fire_epoch_timer )
# Table stats timer
def fire_table_stats_timer(self, dpid):
"""Handler polls a swtich for its table stats.
@param dpid - datapath/switch to contact
"""
#collection epoch: {0:d}".format(self.collection_epoch)
# Send a message and renew timer (if the switch is still around)
if dpid in self.switches:
# Send a table stats request
self.send_table_stats_request(dpid)
self.post_callback(self.table_stats_poll_period, \
lambda : self.fire_table_stats_timer(dpid))
# Port stats timer
def fire_port_stats_timer(self, dpid):
"""Handler polls a switch for its port stats.
@param dpid - datapath/switch to contact
"""
# collection epoch: {0:d}".format(self.collection_epoch)
# Send a ports stats message and renew timer
# (if the switch is still around)
if dpid in self.switches:
self.send_port_stats_request(dpid)
self.post_callback(self.port_stats_poll_period, \
lambda : self.fire_port_stats_timer(dpid))
# Aggregate stats timer
def fire_aggregate_stats_timer(self, dpid):
"""Handler polls a switch for its aggregate stats.
@param dpid - datapath/switch to contact
"""
# collection epoch: {0:d}".format(self.collection_epoch)
# Send a message and renew timer (if the switch is still around)
if dpid in self.switches:
# Grab data for all flows
flow = of.ofp_match()
flow.wildcards = 0xffffffff
self.send_aggregate_stats_request(dpid, flow, 0xff)
self.post_callback(self.aggregate_stats_poll_period, \
lambda : self.fire_aggregate_stats_timer(dpid))
def fire_flow_stats_timer(self, dpid):
"""
Handler polls a switch for its aggregate stats.
@param dpid - datapath/switch to contact
"""
if dpid in self.switches:
# Grab data for all flows
flow = of.ofp_match()
flow.wildcards = 0xffffffff
self.send_flow_stats_request(dpid, flow, 0xff)
self.post_callback(10, lambda : self.fire_flow_stats_timer(dpid))
def fire_queue_stats_timer(self, dpid):
"""
Handler polls a switch for its queue stats.
@param dpid - datapath/switch to contact
"""
if dpid in self.switches:
self.send_queue_stats_request(dpid)
self.post_callback(10, lambda : self.fire_queue_stats_timer(dpid))
# Event handlers. FYI if you need/want to find out what fields exist
# in a specific event type look at src/nox/lib/util.py at the utility
# functions that are used to manipulate them
def handle_datapath_join(self, event):
"""Handler responds to switch join events.
@param event datapath/switch join event to handle
"""
# grab the dpid from the event
dpid = event.datapath_id
epoch = self.collection_epoch
'''
ports = event.ports
for item in ports:
# Figure out what speeds are supported
port_enabled = (item['config'] & openflow.OFPPC_PORT_DOWN) == 0
link_enabled = (item['state'] & openflow.OFPPS_LINK_DOWN) == 0
# Look at features supported, advertised and curr(ent)
supports_10MB_HD = (item['curr'] & openflow.OFPPF_10MB_HD) == \
openflow.OFPPF_10MB_HD
supports_10MB_FD = (item['curr'] & openflow.OFPPF_10MB_FD) > 0
supports_100MB_HD = (item['curr'] & openflow.OFPPF_100MB_HD) > 0
supports_100MB_FD = (item['curr'] & openflow.OFPPF_100MB_FD) == \
openflow.OFPPF_100MB_FD
supports_1GB_HD = (item['curr'] & openflow.OFPPF_1GB_HD) > 0
supports_1GB_FD = (item['curr'] & openflow.OFPPF_1GB_FD) > 0
supports_10GB_FD = (item['curr'] & openflow.OFPPF_10GB_FD) > 0
'''
# Set up some timers for polling this switch periodically
# Whenever a new switch joins set up some timers for polling it
# for its stats (using the monitor.py example as a rough reference)
if not dpid in self.switches:
lg.debug( "Handling switch join. Epoch: %d, dpid: 0x%x" % \
(epoch,dpid) )
# Add this switch to the set of switches being monitored
self.switches.add(dpid)
# Create an entry to store its stats snapshots
self.snapshots[dpid] = deque()
# Create an entry to store its port capabilities
self.port_cap[dpid] = dict()
# Add ports
ports = event.ports
for item in ports:
# create port capability
new_port_cap = PortCapability()
# set fields
new_port_cap.port_name = item['name']
new_port_cap.port_number = item['port_no']
new_port_cap.port_enabled = ((item['config'] & \
openflow.OFPPC_PORT_DOWN) == 0)
new_port_cap.link_enabled = (item['state'] & \
openflow.OFPPS_LINK_DOWN) == 0
new_port_cap.supports_10Mb_hd = (item['curr'] & \
openflow.OFPPF_10MB_HD) == \
openflow.OFPPF_10MB_HD
new_port_cap.supports_10Mb_fd = (item['curr'] & \
openflow.OFPPF_10MB_FD) > 0
new_port_cap.supports_100Mb_hd = (item['curr'] & \
openflow.OFPPF_100MB_HD) > 0
new_port_cap.supports_100Mb_fd = (item['curr'] & \
openflow.OFPPF_100MB_FD) == \
openflow.OFPPF_100MB_FD
new_port_cap.supports_1Gb_hd = (item['curr'] & \
openflow.OFPPF_1GB_HD) > 0
new_port_cap.supports_1Gb_fd = (item['curr'] & \
openflow.OFPPF_1GB_FD) > 0
new_port_cap.supports_10Gb_fd = (item['curr'] & \
openflow.OFPPF_10GB_FD) > 0
# Have the port capability instance compute the
# max port speed
new_port_cap.compute_max_port_speed_bps()
# store the port capability instance to the port map/dict
(self.port_cap[dpid])[new_port_cap.port_number]=new_port_cap
# Set up timers
self.post_callback(self.table_stats_poll_period, \
lambda : self.fire_table_stats_timer(dpid))
self.post_callback(self.port_stats_poll_period, \
lambda : self.fire_port_stats_timer(dpid))
self.post_callback(self.aggregate_stats_poll_period, \
lambda : self.fire_aggregate_stats_timer(dpid))
# Mark switch as silent until we get a stats reply from it
if dpid not in self.silent_switches:
self.silent_switches.add(dpid)
return CONTINUE
def handle_datapath_leave(self, event):
"""Handler responds to switch leave events.
@param event - datapath leave event to handle
"""
dpid = event.datapath_id
lg.debug( "Handling switch leave. Epoch: %d, dpid: 0x%x" % \
(self.collection_epoch, dpid) )
# drop all the stats for this switch
if dpid in self.switches:
self.switches.remove(dpid)
# Throw away its stats snapshots
del self.snapshots[dpid]
# Remove switch from the slient_switch list if it's currently on it
if dpid in self.silent_switches:
self.silent_switches.remove(dpid)
return CONTINUE
# Handlers for switch stats events
def handle_aggregate_stats_in(self, event):
"""Handler responds to receiving aggregate switch stats.
@param event - aggregate stats in event to handle
"""
# Get the snapshot list
dpid = event.datapath_id
# Use the xid as the current collection epoch
current_collection_epoch = event.xid #self.collection_epoch
if event.xid in self.pending_switch_queries:
lg.debug( "responding to switch query for aggregate stats" )
# Publish custom event
reply = SwitchQueryReplyEvent( event.xid, event.datapath_id, \
SwitchQueryReplyEvent.QUERY_AGG_STATS,\
event )
self.post( pyevent( SwitchQueryReplyEvent.NAME, reply ) )
'''
# Remove the xid from our todo list
self.pending_switch_queries.remove( event.xid )
'''
# Check whether this stats reply pushes forward out notion of
# "latest"
'''
if current_collection_epoch > self.max_stats_reply_epoch:
self.max_stats_reply_epoch = current_collection_epoch
'''
# Remove switch from silent_switch list if it's on it
if dpid in self.silent_switches:
self.silent_switches.remove(dpid)
# Get the deque holding our snapshots
try:
switch_stats_q = self.snapshots[dpid]
# Are we adding a new snapshot?
if len(switch_stats_q) == 0:
# Create new snapshot and save it
new_snapshot = Snapshot( self )
# Set the collection epoch and the datapath id
new_snapshot.collection_epoch = current_collection_epoch
new_snapshot.timestamp = time.time()
new_snapshot.dpid = dpid
new_snapshot.number_of_flows = event.flow_count
new_snapshot.bytes_in_flows = event.byte_count
new_snapshot.packets_in_flows = event.packet_count
# Always add the most recent snapshot to the front of the queue
switch_stats_q.appendleft(new_snapshot)
else:
pass
# Get the latest snapshot
latest_snapshot = switch_stats_q[0]
# If it's for this collection epoch, just update it/overwrite it
if latest_snapshot.collection_epoch == current_collection_epoch:
latest_snapshot.timestamp = time.time()
latest_snapshot.number_of_flows = event.flow_count
latest_snapshot.bytes_in_flows = event.byte_count
latest_snapshot.packets_in_flows = event.packet_count
else:
# Only add a new snapshot if it's later in time
# than the "latest" snapshot
if current_collection_epoch > latest_snapshot.collection_epoch:
new_snapshot = Snapshot( self )
new_snapshot.collection_epoch = current_collection_epoch
new_snapshot.timestamp = time.time()
new_snapshot.dpid = dpid
new_snapshot.number_of_flows = event.flow_count
new_snapshot.bytes_in_flows = event.byte_count
new_snapshot.packets_in_flows = event.packet_count
# Calculate any deltas from the latest snapshot
new_snapshot.epoch_delta = current_collection_epoch - \
latest_snapshot.collection_epoch
# Always add the most recent snapshot to the front
# of the queue
switch_stats_q.appendleft(new_snapshot)
# Limit the number of old snapshots we keep around
if len(switch_stats_q) > self.max_snapshots_per_switch:
switch_stats_q.pop()
except Exception:
pass
finally:
pass
return CONTINUE
def handle_table_stats_in(self, event):
"""Handle receipt of table stats from a switch.
@param event - table stats event to handle
"""
dpid = event.datapath_id
if event.xid in self.pending_switch_queries:
lg.debug( "responding to switch query for table stats" )
# Publish custom event
reply = SwitchQueryReplyEvent( event.xid, event.datapath_id, \
SwitchQueryReplyEvent.QUERY_TABLE_STATS,\
event )
self.post( pyevent( SwitchQueryReplyEvent.NAME, reply ) )
# Remove switch from silent_switch list if it's on it
if dpid in self.silent_switches:
self.silent_switches.remove(dpid)
tables = event.tables
return CONTINUE
def handle_port_stats_in(self, event):
"""Handle receipt of port stats from a switch.
@param event - port stats event to handle
"""
dpid = event.datapath_id
if event.xid in self.pending_switch_queries:
lg.debug( "responding to switch query for port stats" )
# Publish custom event
reply = SwitchQueryReplyEvent( event.xid, event.datapath_id, \
SwitchQueryReplyEvent.QUERY_PORT_STATS,\
event )
self.post( pyevent( SwitchQueryReplyEvent.NAME, reply ) )
# Use the reply xid as the current collection epoch
current_collection_epoch = event.xid #self.collection_epoch
# Check whether this stats reply pushes forward out notion of
# "latest"
if current_collection_epoch > self.max_stats_reply_epoch:
self.max_stats_reply_epoch = current_collection_epoch
# Remove switch from silent_switch list if it's on it
if dpid in self.silent_switches:
self.silent_switches.remove(dpid)
'''
self.topo.setNodeFaultStatus(dpid, False)
'''
ports = event.ports
try:
switch_stats_q = self.snapshots[dpid]
# Are we adding a new snapshot?
if len(switch_stats_q) == 0:
# Create new snapshot and save it
new_snapshot = Snapshot( self )
# Set the collection epoch and the datapath id
new_snapshot.collection_epoch = current_collection_epoch
new_snapshot.timestamp = time.time()
new_snapshot.dpid = dpid
new_snapshot.store_port_info(ports, self.port_cap[dpid])
# Always add the most recent snapshot to the front of the queue
switch_stats_q.appendleft(new_snapshot)
else:
pass
# Get the latest snapshot
latest_snapshot = switch_stats_q[0]
# If the latest snapshot is for this collection epoch, just
# update it
if latest_snapshot.collection_epoch == current_collection_epoch:
latest_snapshot.timestamp = time.time()
latest_snapshot.store_port_info(ports, self.port_cap[dpid])
# update deltas if we can
if len(switch_stats_q) > 1:
previous_snapshot = switch_stats_q[1]
latest_snapshot.compute_delta_from(previous_snapshot)
else:
# Only add a new snapshot if it's more recent
# than the collection epoch of the "latest" snapshot
if current_collection_epoch > latest_snapshot.collection_epoch:
new_snapshot = Snapshot( self )
new_snapshot.collection_epoch = current_collection_epoch
new_snapshot.timestamp = time.time()
'''
new_snapshot.ports_active = ports_active
'''
new_snapshot.dpid = dpid
# store port info
new_snapshot.store_port_info(ports, self.port_cap[dpid])
# Compute deltas from the previous snapshot
new_snapshot.compute_delta_from(latest_snapshot)
# Always add the most recent snapshot to the
# front of the queue
switch_stats_q.appendleft(new_snapshot)
# Limit the number of old snapshots we keep around
if len(switch_stats_q) > self.max_snapshots_per_switch:
switch_stats_q.pop()
except Exception:
pass
finally:
pass
return CONTINUE
def handle_flow_stats_in(self, event):
if event.xid in self.pending_switch_queries:
lg.debug( "responding to switch query for flow stats" )
# Publish custom event
reply = SwitchQueryReplyEvent( event.xid, event.datapath_id, \
SwitchQueryReplyEvent.QUERY_FLOW_STATS, event )
self.post( pyevent( SwitchQueryReplyEvent.NAME, reply ) )
return CONTINUE
def handle_queue_stats_in(self,event):
lg.debug( "handle queue stats in: %s" % (event.__dict__) )
if event.xid in self.pending_switch_queries:
lg.debug( "responding to switch query for queue stats" )
# Publish custom event
reply = SwitchQueryReplyEvent( event.xid, event.datapath_id, \
SwitchQueryReplyEvent.QUERY_QUEUE_STATS, event )
self.post( pyevent( SwitchQueryReplyEvent.NAME, reply ) )
return CONTINUE
# Static functions for encoding custom events as json
def encode_switch_query( self, obj ):
if isinstance( obj.pyevent, SwitchQueryReplyEvent ):
if obj.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_PORT_STATS:
return [obj.pyevent.reply.ports]
elif obj.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_TABLE_STATS:
return [obj.pyevent.reply.tables]
elif obj.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_AGG_STATS:
# Create a dict
dict = {}
dict['packet_count']=obj.pyevent.reply.packet_count
dict['byte_count']=obj.pyevent.reply.byte_count
dict['flow_count']=obj.pyevent.reply.flow_count
return [dict]
elif obj.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_LATEST_SNAPSHOT:
return obj.pyevent.reply.to_dict()
elif obj.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_FLOW_STATS:
return [obj.pyevent.reply.flows]
elif obj.pyevent.query_type == \
SwitchQueryReplyEvent.QUERY_QUEUE_STATS:
return [obj.pyevent.reply.queues]
else:
lg.debug( "not encoding switch query reply event" )
raise TypeError( repr(obj) + " is not JSON serializable" )
def handle_link_util_reply_event(self, event):
if len(event.pyevent.port_utils) > 0:
portUtilsMsg = {}
portUtilsMsg['type'] = "monitoring"
portUtilsMsg['msg_type'] = "linkutils"
utils = []
for util in event.pyevent.port_utils:
u = {}
u['dpid'] = hex(util.dpid)[2:len(hex(util.dpid))-1]
u['port'] = str(util.port)
'''***THIS CALCULATION OF UTILIZATION IS DISPUTABLE***'''
add = util.bps_transmitted+util.bps_received
avgrate = add/2
if util.capacity:
ut = float(avgrate) / float(util.capacity)
else:
ut = float(0)
u['utilization'] = ut
utils.append(u)
portUtilsMsg['utils'] = utils
if event.pyevent.xid != -1:
lg.debug( "Replying to active poll" )
if event.pyevent.xid in self.pending_queries:
proto = self.pending_queries.pop( event.pyevent.xid )
if proto.connected:
proto.send(portUtilsMsg)
else:
# send to subscribed listeners
if "linkutils" in self.subscribers:
for stream in self.subscribers["linkutils"]:
stream.reply(json.dumps(portUtilsMsg))
return CONTINUE
def getInterface(self):
return str(Monitoring)
def getFactory():
"""Returns an object able to create monitoring instances."""
class Factory:
"""A class able to create monitoring instances."""
def instance(self, ctxt):
"""Returns a/the monitoring instance."""
return Monitoring(ctxt)
return Factory()
class PortCapability:
"""Class keeps track of port capabilities/capcity"""
def __init__(self):
self.port_name = ""
self.port_number = -1
self.port_enabled = False
self.link_enabled = False
self.supports_10Mb_hd = False
self.supports_10Mb_fd = False
self.supports_100Mb_hd = False
self.supports_100Mb_fd = False
self.supports_1Gb_hd = False
self.supports_1Gb_fd = False
self.supports_10Gb_fd = False
self.max_speed = 0
self.full_duplex = False
def compute_max_port_speed_bps(self):
"""Compute the max port speed in bps"""
if self.supports_10Gb_fd == True:
self.max_speed = 10000 * 1e6
elif self.supports_1Gb_hd == True or self.supports_1Gb_fd == True:
self.max_speed = 1000 * 1e6
elif self.supports_100Mb_hd == True or self.supports_100Mb_fd == True:
self.max_speed = 100 * 1e6
elif self.supports_10Mb_hd == True or self.supports_10Mb_fd == True:
self.max_speed = 10 * 1e6
else:
self.max_speed = 0
return self.max_speed
def to_dict(self):
dict = {}
dict['port_name'] = self.port_name
dict['port_number'] = self.port_number
dict['port_enabled'] = self.port_enabled
dict['max_speed'] = self.compute_max_port_speed_bps()
dict['full_duplex'] = self.supports_10Gb_fd or self.supports_1Gb_fd\
or self.supports_100Mb_fd or self.supports_10Mb_fd
return dict
class PortUtilization:
"""Class stores port tx/rx utilization"""
def __init__(self):
self.dpid = -1
self.port = -1
self.bps_transmitted = 0.0
self.bps_received = 0.0
###self.max_speed = 0
class PortInfo:
"""Class keeps track of port capabilities and recent usage"""
def __init__(self, port_capabilities, monitoring_module):
"""Init
@param port_capabilities - port capacity data
"""
self.owner_snapshot = None # Snapshot we belong to
self.port_cap = port_capabilities
self.port_number = -1
self.monitoring = monitoring_module
# Per-port counters
self.total_rx_bytes = -1
self.total_tx_bytes = -1
self.total_rx_packets = -1
self.total_tx_packets = -1
self.total_rx_packets_dropped = -1
self.total_tx_packets_dropped = -1
self.total_rx_errors = -1
self.total_tx_errors = -1
# changes in port stats data since the last collection epoch
self.delta_rx_bytes = -1
self.delta_tx_bytes = -1
self.delta_rx_packets = -1
self.delta_tx_packets = -1
self.delta_rx_packets_dropped = -1
self.delta_tx_packets_dropped = -1
self.delta_rx_errors = -1
self.delta_tx_errors = -1
def to_dict(self):
dict = {}
dict['port_number'] = self.port_number
# Save the nested capabilities structure
dict['port_cap'] = self.port_cap.to_dict()
# Counters
dict['total_rx_bytes'] = self.total_rx_bytes
dict['total_tx_bytes'] = self.total_tx_bytes
dict['total_rx_packets'] = self.total_rx_packets
dict['total_tx_packets'] = self.total_tx_packets
dict['total_rx_packets_dropped'] = self.total_rx_packets_dropped
dict['total_tx_packets_dropped'] = self.total_tx_packets_dropped
dict['total_rx_errors'] = self.total_rx_errors
dict['total_tx_errors'] = self.total_tx_errors
# Deltas
dict['delta_rx_bytes'] = self.delta_rx_bytes
dict['delta_tx_bytes'] = self.delta_tx_bytes
dict['delta_rx_packets'] = self.delta_rx_packets
dict['delta_tx_packets'] = self.delta_tx_packets
dict['delta_rx_packets_dropped'] = self.delta_rx_packets_dropped
dict['delta_tx_packets_dropped'] = self.delta_tx_packets_dropped
dict['delta_rx_errors'] = self.delta_rx_errors
dict['delta_tx_errors'] = self.delta_tx_errors
return dict
def compute_delta_from(self, rhs, send_alarm = True):
"""Compute the counter and epoch deltas between this snapshot
and another (rhs)
@param rhs - port info object to compute delta from
"""
self.delta_rx_bytes = max(0, self.total_rx_bytes - rhs.total_rx_bytes)
self.delta_tx_bytes = max(0, self.total_tx_bytes - rhs.total_tx_bytes)
self.delta_rx_packets = max(0, \
self.total_rx_packets - rhs.total_rx_packets)
self.delta_tx_packets = max(0,\
self.total_tx_packets - rhs.total_tx_packets)
self.delta_rx_packets_dropped = max(0, \
self.total_rx_packets_dropped - \
rhs.total_rx_packets_dropped)
self.delta_tx_packets_dropped = max(0,\
self.total_tx_packets_dropped - \
rhs.total_tx_packets_dropped)
self.delta_rx_errors = max(0,\
self.total_rx_errors - rhs.total_rx_errors)
self.delta_tx_errors = max(0,\
self.total_tx_errors - rhs.total_tx_errors)
port_has_problems = False
if self.delta_rx_packets_dropped > 0 or \
self.delta_tx_packets_dropped > 0:
port_has_problems = True
elif self.delta_rx_errors > 0 or self.delta_tx_errors > 0:
port_has_problems = True
if port_has_problems and send_alarm:
# Post a custom port error event
portError = PortErrorEvent( -1, self.owner_snapshot.dpid, \
self.port_number )
portError.rx_dropped = self.delta_rx_packets_dropped
portError.tx_dropped = self.delta_tx_packets_dropped
portError.rx_errors = self.delta_rx_errors
portError.tx_errors = self.delta_tx_errors
self.post( pyevent( PortErrorEvent.NAME, portError ) )
def compute_max_port_speed_bps(self):
"""Compute the max port speed in bps"""
if self.port_cap.supports_10Gb_fd:
self.port_cap.max_speed = 10000 * 1e6
elif self.port_cap.supports_1Gb_hd or self.port_cap.supports_1Gb_fd:
self.port_cap.max_speed = 1000 * 1e6
elif self.port_cap.supports_100Mb_hd or \
self.port_cap.supports_100Mb_fd:
self.port_cap.max_speed = 100 * 1e6
elif self.port_cap.supports_10Mb_hd or self.port_cap.supports_10Mb_fd:
self.port_cap.max_speed = 10 * 1e6
else:
self.port_cap.max_speed = 0
return self.port_cap.max_speed
def estimate_packets_received_per_sec(self):
"""Estimate the packets received per sec
as delta_rx_packets/(time since last collection in seconds)"""
if self.delta_rx_packets == -1:
return 0
else:
return self.delta_rx_packets / self.owner_snapshot.time_since_delta
#(self.monitoring.collection_epoch_duration * \
# self.owner_snapshot.epoch_delta)
def estimate_packets_sent_per_sec(self):
"""Estimate the packets sent per sec
as delta_tx_packets/(time since last collection in seconds)"""
if self.delta_tx_packets == -1:
return 0
else:
return self.delta_tx_packets / self.owner_snapshot.time_since_delta
#(self.monitoring.collection_epoch_duration * \
# self.owner_snapshot.epoch_delta)
def estimate_bits_received_per_sec(self):
"""Estimate the bits received per sec
as delta_rx_bits/(time since last collection in seconds)"""
if self.delta_rx_bytes == -1:
return 0
else:
return (self.delta_rx_bytes*8) / \
self.owner_snapshot.time_since_delta
#(self.monitoring.collection_epoch_duration * \
# self.owner_snapshot.epoch_delta)
def estimate_bits_sent_per_sec(self):
"""Estimate the bits sent per sec
as delta_tx_bits/(time since last collection in seconds)"""
if self.delta_tx_bytes == -1:
return 0
else:
return (self.delta_tx_bytes*8) / \
self.owner_snapshot.time_since_delta
#(self.monitoring.collection_epoch_duration * \
# self.owner_snapshot.epoch_delta)
def estimate_port_rx_utilization(self):
"""Estimate the port rx utilization as
[(bits received/s)/max port speed in bits per sec]*100"""
port_speed_bps = self.port_cap.compute_max_port_speed_bps()
#self.port_cap[self.port_number].compute_max_port_speed_bps()
if port_speed_bps > 0:
return (self.estimate_bits_received_per_sec()/port_speed_bps)*100
else:
return 0
def estimate_port_tx_utilization(self):
"""Estimate the port rx utilization as
[(bits received/s)/max port speed in bits per sec]*100"""
port_speed_bps = self.port_cap.compute_max_port_speed_bps()
#self.port_cap[self.port_number].compute_max_port_speed_bps()
if port_speed_bps > 0:
return (self.estimate_bits_sent_per_sec()/port_speed_bps)*100
else:
return 0
def estimate_avg_port_utilization(self):
"""Estimate the average port utilization."""
return ( self.estimate_port_rx_utilization()+\
self.estimate_port_tx_utilization() )/2.0
class Snapshot:
"""Simple container for storing statistics snapshots for a switch"""
def __init__(self, monitor_inst):
self.monitor = monitor_inst
# Initialize all counters to -1 that way we'll know
# whether things have actually been
# updated. An update gives each counter a value >= 0
self.dpid = -1 # what switch
self.collection_epoch = -1 # when collected
self.time_since_delta = 0
self.timestamp = -1 # system time stamp
# spacing between this snapshot and
# the last collection epoch, should usually be 1 so check
self.epoch_delta = -1
#self.ports_active = -1
# From aggregate stats - these are point in time counts
# i.e. number of flows active "now"
self.number_of_flows = -1
self.bytes_in_flows = -1
self.packets_in_flows = -1
# Port stats dict - dictionary of per port counters
self.port_info = dict()
# Aggregate counters over ALL the ports for a specific switch
self.total_rx_bytes = -1
self.total_tx_bytes = -1
self.total_rx_packets = -1
self.total_tx_packets = -1
self.total_rx_packets_dropped = -1
self.total_tx_packets_dropped = -1
self.total_rx_errors = -1
self.total_tx_errors = -1
# changes in Aggregate switch-level snapshot data since the
# last collection epoch
self.delta_rx_bytes = -1
self.delta_tx_bytes = -1
self.delta_rx_packets = -1
self.delta_tx_packets = -1
self.delta_rx_packets_dropped = -1
self.delta_tx_packets_dropped = -1
self.delta_rx_errors = -1
self.delta_tx_errors = -1
def to_dict(self):
dict = {}
dict['dpid'] = self.dpid
dict['collection_epoch'] = self.collection_epoch
dict['timestamp'] = self.timestamp
dict['time_since_delta'] = self.time_since_delta
dict['epoch_delta'] = self.epoch_delta
dict['number_of_flows'] = self.number_of_flows
dict['bytes_in_flows'] = self.bytes_in_flows
dict['packets_in_flows'] = self.packets_in_flows
# Port info
ports = {}
for port_num in self.port_info:
ports[port_num] = self.port_info[port_num].to_dict()
dict['ports'] = ports
# Counters
dict['total_rx_bytes'] = self.total_rx_bytes
dict['total_tx_bytes'] = self.total_tx_bytes
dict['total_rx_packets'] = self.total_rx_packets
dict['total_tx_packets'] = self.total_tx_packets
dict['total_rx_packets_dropped'] = self.total_rx_packets_dropped
dict['total_tx_packets_dropped'] = self.total_tx_packets_dropped
dict['total_rx_errors'] = self.total_rx_errors
dict['total_tx_errors'] = self.total_tx_errors
# Deltas
dict['delta_rx_bytes'] = self.delta_rx_bytes
dict['delta_tx_bytes'] = self.delta_tx_bytes
dict['delta_rx_packets'] = self.delta_rx_packets
dict['delta_tx_packets'] = self.delta_tx_packets
dict['delta_rx_packets_dropped'] = self.delta_rx_packets_dropped
dict['delta_tx_packets_dropped'] = self.delta_tx_packets_dropped
dict['delta_rx_errors'] = self.delta_rx_errors
dict['delta_tx_errors'] = self.delta_tx_errors
return dict
def compute_delta_from(self, rhs):
"""Compute the counter and epoch deltas between this
snapshot and another (rhs)
@param rhs - snapshot to compute delta from
"""
if self.collection_epoch != rhs.collection_epoch:
self.epoch_delta = self.collection_epoch - rhs.collection_epoch
self.time_since_delta = self.timestamp - rhs.timestamp
self.delta_rx_bytes = max(0, self.total_rx_bytes - rhs.total_rx_bytes)
self.delta_tx_bytes = max(0, self.total_tx_bytes - rhs.total_tx_bytes)
self.delta_rx_packets = max(0, \
self.total_rx_packets - rhs.total_rx_packets)
self.delta_tx_packets = max(0, \
self.total_tx_packets - rhs.total_tx_packets)
self.delta_rx_packets_dropped = max(0,\
self.total_rx_packets_dropped - \
rhs.total_rx_packets_dropped)
self.delta_tx_packets_dropped = max(0,self.total_tx_packets_dropped - \
rhs.total_tx_packets_dropped)
self.delta_rx_errors = max(0, \
self.total_rx_errors - rhs.total_rx_errors)
self.delta_tx_errors = max(0, \
self.total_tx_errors - rhs.total_tx_errors)
# Send an event to indicate that this switch is having problems
# when delta_*_packets_dropped or delta_*_errors is > 0?
# At this point we wouldn't be able to nail down any more
# specific port info. We could probably let the port delta
# computation do that. An event/alert at this point
# may be a high-level (or wasted)
# alert if the port delta sends a more specific event as well.
# Compute port deltas
for key in self.port_info:
self.port_info[key].compute_delta_from( rhs.port_info[key] )
def store_port_info(self, ports, port_cap):
"""Save per-port counters
@param ports - collection of port info structures
@param port_cap - collection of port capacity structures
"""
self.total_rx_bytes = 0
self.total_tx_bytes = 0
self.total_rx_packets = 0
self.total_tx_packets = 0
self.total_rx_packets_dropped = 0
self.total_tx_packets_dropped = 0
self.total_rx_errors = 0
self.total_tx_errors = 0
for item in ports:
# Compute all the counter totals
self.total_rx_bytes += item['rx_bytes']
self.total_tx_bytes += item['tx_bytes']
self.total_rx_packets += item['rx_packets']
self.total_tx_packets += item['tx_packets']
self.total_rx_packets_dropped += item['rx_dropped']
self.total_tx_packets_dropped += item['tx_dropped']
self.total_rx_errors += item['rx_errors']
self.total_tx_errors += item['tx_errors']
# Store each item in the ports collection in a port dict
new_port_info = PortInfo(port_cap[item['port_no']], self.monitor)
#new_port_info = PortInfo(port_cap)
new_port_info.owner_snapshot = self
new_port_info.port_number = item['port_no']
new_port_info.total_rx_bytes = item['rx_bytes']
new_port_info.total_tx_bytes = item['tx_bytes']
new_port_info.total_rx_packets = item['rx_packets']
new_port_info.total_tx_packets = item['tx_packets']
new_port_info.total_rx_packets_dropped = item['rx_dropped']
new_port_info.total_tx_packets_dropped = item['tx_dropped']
new_port_info.total_rx_errors = item['rx_errors']
new_port_info.total_tx_errors = item['tx_errors']
self.port_info[new_port_info.port_number] = new_port_info
def get_total_rx_bytes(self):
"""Return the total number of bytes received at this switch
across all its ports."""
# For each port in the port dict
# sum the total rx bytes
return self.total_rx_bytes
def get_total_tx_bytes(self):
"""Return the total number of bytes transmitted by this switch
across all its ports."""
return self.total_tx_bytes
def ready(self):
"""Indicate whether this snapshot has been filled in with data
from table, aggregate and port stats replies. A snaphot is not
ready until all three sets of counter data have been received."""
# Check whether our delta counters have been filled in
# If the collection epoch = 1 then we're ready
if self.collection_epoch == 1:
return True
elif self.delta_rx_bytes == -1:
return False
elif self.delta_tx_bytes == -1:
return False
elif self.delta_rx_packets_dropped == -1:
return False
elif self.delta_tx_packets_dropped == -1:
return False
elif self.delta_rx_errors == -1:
return False
elif self.delta_tx_errors == -1:
return False
else:
return True
|
# -*- coding: utf-8 -*-
import abc
import logging
import datetime
import functools
import httplib as http
import time
import urlparse
import uuid
from flask import request
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
from requests.exceptions import HTTPError as RequestsHTTPError
from modularodm import fields, Q
from modularodm.storage.base import KeyExistsException
from modularodm.validators import MaxLengthValidator, URLValidator
from requests_oauthlib import OAuth1Session
from requests_oauthlib import OAuth2Session
from framework.auth import cas
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo import ObjectId, StoredObject
from framework.mongo.utils import unique_on
from framework.mongo.validators import string_required
from framework.sessions import session
from website import settings
from website.oauth.utils import PROVIDER_LOOKUP
from website.security import random_string
from website.util import web_url_for, api_v2_url
logger = logging.getLogger(__name__)
OAUTH1 = 1
OAUTH2 = 2
generate_client_secret = functools.partial(random_string, length=40)
@unique_on(['provider', 'provider_id'])
class ExternalAccount(StoredObject):
"""An account on an external service.
Note that this object is not and should not be aware of what other objects
are associated with it. This is by design, and this object should be kept as
thin as possible, containing only those fields that must be stored in the
database.
The ``provider`` field is a de facto foreign key to an ``ExternalProvider``
object, as providers are not stored in the database.
"""
_id = fields.StringField(default=lambda: str(ObjectId()), primary=True)
# The OAuth credentials. One or both of these fields should be populated.
# For OAuth1, this is usually the "oauth_token"
# For OAuth2, this is usually the "access_token"
oauth_key = fields.StringField()
# For OAuth1, this is usually the "oauth_token_secret"
# For OAuth2, this is not used
oauth_secret = fields.StringField()
# Used for OAuth2 only
refresh_token = fields.StringField()
expires_at = fields.DateTimeField()
scopes = fields.StringField(list=True, default=lambda: list())
# The `name` of the service
# This lets us query for only accounts on a particular provider
provider = fields.StringField(required=True)
# The proper 'name' of the service
# Needed for account serialization
provider_name = fields.StringField(required=True)
# The unique, persistent ID on the remote service.
provider_id = fields.StringField()
# The user's name on the external service
display_name = fields.StringField()
# A link to the user's profile on the external service
profile_url = fields.StringField()
def __repr__(self):
return '<ExternalAccount: {}/{}>'.format(self.provider,
self.provider_id)
class ExternalProviderMeta(abc.ABCMeta):
"""Keeps track of subclasses of the ``ExternalProvider`` object"""
def __init__(cls, name, bases, dct):
super(ExternalProviderMeta, cls).__init__(name, bases, dct)
if not isinstance(cls.short_name, abc.abstractproperty):
PROVIDER_LOOKUP[cls.short_name] = cls
class ExternalProvider(object):
"""A connection to an external service (ex: GitHub).
This object contains no credentials, and is not saved in the database.
It provides an unauthenticated session with the provider, unless ``account``
has been set - in which case, it provides a connection authenticated as the
``ExternalAccount`` instance.
Conceptually, this can be thought of as an extension of ``ExternalAccount``.
It's a separate object because this must be subclassed for each provider,
and ``ExternalAccount`` instances are stored within a single collection.
"""
__metaclass__ = ExternalProviderMeta
# Default to OAuth v2.0.
_oauth_version = OAUTH2
# Providers that have expiring tokens must override these
auto_refresh_url = None
refresh_time = 0 # When to refresh the oauth_key (seconds)
expiry_time = 0 # If/When the refresh token expires (seconds). 0 indicates a non-expiring refresh token
def __init__(self, account=None):
super(ExternalProvider, self).__init__()
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.provider_id if self.account else 'anonymous'
)
@abc.abstractproperty
def auth_url_base(self):
"""The base URL to begin the OAuth dance"""
pass
@property
def auth_url(self):
"""The URL to begin the OAuth dance.
This property method has side effects - it at least adds temporary
information to the session so that callbacks can be associated with
the correct user. For OAuth1, it calls the provider to obtain
temporary credentials to start the flow.
"""
# create a dict on the session object if it's not already there
if session.data.get('oauth_states') is None:
session.data['oauth_states'] = {}
if self._oauth_version == OAUTH2:
# build the URL
oauth = OAuth2Session(
self.client_id,
redirect_uri=web_url_for('oauth_callback',
service_name=self.short_name,
_absolute=True),
scope=self.default_scopes,
)
url, state = oauth.authorization_url(self.auth_url_base)
# save state token to the session for confirmation in the callback
session.data['oauth_states'][self.short_name] = {'state': state}
elif self._oauth_version == OAUTH1:
# get a request token
oauth = OAuth1Session(
client_key=self.client_id,
client_secret=self.client_secret,
)
# request temporary credentials from the provider
response = oauth.fetch_request_token(self.request_token_url)
# store them in the session for use in the callback
session.data['oauth_states'][self.short_name] = {
'token': response.get('oauth_token'),
'secret': response.get('oauth_token_secret'),
}
url = oauth.authorization_url(self.auth_url_base)
return url
@abc.abstractproperty
def callback_url(self):
"""The provider URL to exchange the code for a token"""
pass
@abc.abstractproperty
def client_id(self):
"""OAuth Client ID. a/k/a: Application ID"""
pass
@abc.abstractproperty
def client_secret(self):
"""OAuth Client Secret. a/k/a: Application Secret, Application Key"""
pass
default_scopes = list()
@abc.abstractproperty
def name(self):
"""Human-readable name of the service. e.g.: ORCiD, GitHub"""
pass
@abc.abstractproperty
def short_name(self):
"""Name of the service to be used internally. e.g.: orcid, github"""
pass
def auth_callback(self, user, **kwargs):
"""Exchange temporary credentials for permanent credentials
This is called in the view that handles the user once they are returned
to the OSF after authenticating on the external service.
"""
if 'error' in request.args:
return False
# make sure the user has temporary credentials for this provider
try:
cached_credentials = session.data['oauth_states'][self.short_name]
except KeyError:
raise PermissionsError('OAuth flow not recognized.')
if self._oauth_version == OAUTH1:
request_token = request.args.get('oauth_token')
# make sure this is the same user that started the flow
if cached_credentials.get('token') != request_token:
raise PermissionsError('Request token does not match')
response = OAuth1Session(
client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=cached_credentials.get('token'),
resource_owner_secret=cached_credentials.get('secret'),
verifier=request.args.get('oauth_verifier'),
).fetch_access_token(self.callback_url)
elif self._oauth_version == OAUTH2:
state = request.args.get('state')
# make sure this is the same user that started the flow
if cached_credentials.get('state') != state:
raise PermissionsError('Request token does not match')
try:
response = OAuth2Session(
self.client_id,
redirect_uri=web_url_for(
'oauth_callback',
service_name=self.short_name,
_absolute=True
),
).fetch_token(
self.callback_url,
client_secret=self.client_secret,
code=request.args.get('code'),
)
except (MissingTokenError, RequestsHTTPError):
raise HTTPError(http.SERVICE_UNAVAILABLE)
# pre-set as many values as possible for the ``ExternalAccount``
info = self._default_handle_callback(response)
# call the hook for subclasses to parse values from the response
info.update(self.handle_callback(response))
return self._set_external_account(user, info)
def _set_external_account(self, user, info):
try:
# create a new ``ExternalAccount`` ...
self.account = ExternalAccount(
provider=self.short_name,
provider_id=info['provider_id'],
provider_name=self.name,
)
self.account.save()
except KeyExistsException:
# ... or get the old one
self.account = ExternalAccount.find_one(
Q('provider', 'eq', self.short_name) &
Q('provider_id', 'eq', info['provider_id'])
)
assert self.account is not None
# ensure that provider_name is correct
self.account.provider_name = self.name
# required
self.account.oauth_key = info['key']
# only for OAuth1
self.account.oauth_secret = info.get('secret')
# only for OAuth2
self.account.expires_at = info.get('expires_at')
self.account.refresh_token = info.get('refresh_token')
# additional information
self.account.display_name = info.get('display_name')
self.account.profile_url = info.get('profile_url')
self.account.save()
# add it to the user's list of ``ExternalAccounts``
if self.account not in user.external_accounts:
user.external_accounts.append(self.account)
user.save()
return True
def _default_handle_callback(self, data):
"""Parse as much out of the key exchange's response as possible.
This should not be over-ridden in subclasses.
"""
if self._oauth_version == OAUTH1:
key = data.get('oauth_token')
secret = data.get('oauth_token_secret')
values = {}
if key:
values['key'] = key
if secret:
values['secret'] = secret
return values
elif self._oauth_version == OAUTH2:
key = data.get('access_token')
refresh_token = data.get('refresh_token')
expires_at = data.get('expires_at')
scopes = data.get('scope')
values = {}
if key:
values['key'] = key
if scopes:
values['scope'] = scopes
if refresh_token:
values['refresh_token'] = refresh_token
if expires_at:
values['expires_at'] = datetime.datetime.fromtimestamp(
float(expires_at)
)
return values
@abc.abstractmethod
def handle_callback(self, response):
"""Hook for allowing subclasses to parse information from the callback.
Subclasses should implement this method to provide `provider_id`
and `profile_url`.
Values provided by ``self._default_handle_callback`` can be over-ridden
here as well, in the unexpected case that they are parsed incorrectly
by default.
:param response: The JSON returned by the provider during the exchange
:return dict:
"""
pass
def refresh_oauth_key(self, force=False, extra={}, resp_auth_token_key='access_token',
resp_refresh_token_key='refresh_token', resp_expiry_fn=None):
"""Handles the refreshing of an oauth_key for account associated with this provider.
Not all addons need to use this, as some do not have oauth_keys that expire.
Subclasses must define the following for this functionality:
`auto_refresh_url` - URL to use when refreshing tokens. Must use HTTPS
`refresh_time` - Time (in seconds) that the oauth_key should be refreshed after.
Typically half the duration of validity. Cannot be 0.
Providers may have different keywords in their response bodies, kwargs
`resp_*_key` allow subclasses to override these if necessary.
kwarg `resp_expiry_fn` allows subclasses to specify a function that will return the
datetime-formatted oauth_key expiry key, given a successful refresh response from
`auto_refresh_url`. A default using 'expires_at' as a key is provided.
"""
# Ensure this is an authenticated Provider that uses token refreshing
if not (self.account and self.auto_refresh_url):
return False
# Ensure this Provider is for a valid addon
if not (self.client_id and self.client_secret):
return False
# Ensure a refresh is needed
if not (force or self._needs_refresh()):
return False
if self.has_expired_credentials and not force:
return False
resp_expiry_fn = resp_expiry_fn or (lambda x: datetime.datetime.utcfromtimestamp(time.time() + float(x['expires_in'])))
client = OAuth2Session(
self.client_id,
token={
'access_token': self.account.oauth_key,
'refresh_token': self.account.refresh_token,
'token_type': 'Bearer',
'expires_in': '-30',
}
)
extra.update({
'client_id': self.client_id,
'client_secret': self.client_secret
})
token = client.refresh_token(
self.auto_refresh_url,
**extra
)
self.account.oauth_key = token[resp_auth_token_key]
self.account.refresh_token = token[resp_refresh_token_key]
self.account.expires_at = resp_expiry_fn(token)
self.account.save()
return True
def _needs_refresh(self):
"""Determines whether or not an associated ExternalAccount needs
a oauth_key.
return bool: True if needs_refresh
"""
if self.refresh_time and self.account.expires_at:
return (self.account.expires_at - datetime.datetime.utcnow()).total_seconds() < self.refresh_time
return False
@property
def has_expired_credentials(self):
"""Determines whether or not an associated ExternalAccount has
expired credentials that can no longer be renewed
return bool: True if cannot be refreshed
"""
if self.expiry_time and self.account.expires_at:
return (datetime.datetime.utcnow() - self.account.expires_at).total_seconds() > self.expiry_time
return False
class ApiOAuth2Scope(StoredObject):
"""
Store information about recognized OAuth2 scopes. Only scopes registered under this database model can
be requested by third parties.
"""
_id = fields.StringField(primary=True,
default=lambda: str(ObjectId()))
name = fields.StringField(unique=True, required=True, index=True)
description = fields.StringField(required=True)
is_active = fields.BooleanField(default=True, index=True) # TODO: Add mechanism to deactivate a scope?
class ApiOAuth2Application(StoredObject):
"""Registration and key for user-created OAuth API applications
This collection is also used by CAS to create the master list of available applications.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
_id = fields.StringField(
primary=True,
default=lambda: str(ObjectId())
)
# Client ID and secret. Use separate ID field so ID format doesn't have to be restricted to database internals.
client_id = fields.StringField(default=lambda: uuid.uuid4().hex, # Not *guaranteed* unique, but very unlikely
unique=True,
index=True)
client_secret = fields.StringField(default=generate_client_secret)
is_active = fields.BooleanField(default=True, # Set to False if application is deactivated
index=True)
owner = fields.ForeignField('User',
index=True,
required=True)
# User-specified application descriptors
name = fields.StringField(index=True, required=True, validate=[string_required, MaxLengthValidator(200)])
description = fields.StringField(required=False, validate=MaxLengthValidator(1000))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow,
editable=False)
home_url = fields.StringField(required=True,
validate=URLValidator())
callback_url = fields.StringField(required=True,
validate=URLValidator())
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2Application
Does not delete the database record, but revokes all tokens and sets a flag that hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.
resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa
self.is_active = False
if save:
self.save()
return True
def reset_secret(self, save=False):
"""
Reset the secret of an ApiOAuth2Application
Revokes all tokens
"""
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True
@property
def url(self):
return '/settings/applications/{}/'.format(self.client_id)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
path = '/applications/{}/'.format(self.client_id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
class ApiOAuth2PersonalToken(StoredObject):
"""Information for user-created personal access tokens
This collection is also used by CAS to create the master list of available tokens.
Any changes made to field names in this model must be echoed in the CAS implementation.
"""
_id = fields.StringField(primary=True,
default=lambda: str(ObjectId()))
# Name of the field being `token_id` is a CAS requirement.
# This is the actual value of the token that's used to authenticate
token_id = fields.StringField(default=functools.partial(random_string, length=70),
unique=True)
owner = fields.ForeignField('User',
index=True,
required=True)
name = fields.StringField(required=True, index=True)
# This field is a space delimited list of scopes, e.g. "osf.full_read osf.full_write"
scopes = fields.StringField(required=True)
is_active = fields.BooleanField(default=True, index=True)
def deactivate(self, save=False):
"""
Deactivate an ApiOAuth2PersonalToken
Does not delete the database record, but hides this instance from API
"""
client = cas.get_client()
# Will raise a CasHttpError if deletion fails for any reason other than the token
# not yet being created. This will also stop setting of active=False.
try:
resp = client.revoke_tokens({'token': self.token_id}) # noqa
except cas.CasHTTPError as e:
if e.code == 400:
pass # Token hasn't been used yet, so not created in cas
else:
raise e
self.is_active = False
if save:
self.save()
return True
@property
def url(self):
return '/settings/tokens/{}/'.format(self._id)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
# Properties used by Django and DRF "Links: self" field
@property
def absolute_api_v2_url(self):
path = '/tokens/{}/'.format(self._id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
|
<reponame>Meng-Xiang-Rui/qusource
import numpy as np
from scipy.linalg import expm
import matplotlib.pyplot as plt
import scipy.stats as stats
from scipy.stats import bernoulli
import random
trans_mat = np.zeros((4, 10))
trans_mat[0, 1] = trans_mat[1, 3] = trans_mat[2, 4] = trans_mat[3, 8] = 1
def int2bin(n, count=24):
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def loc(x):
"""
turn a string of 0/1 into its decimal number
:param x: string of 0/1
:return: Decimal number
"""
return int(x, 2)
def dagger(x):
return x.T.conj()
def state_init(N, site):
"""
:param N: length of state
:param site: bit string of the site
:type site: str
:return: state
"""
init_state = np.zeros(2**N)
init_state[loc(site)] = 1
return init_state
def W_state(N, log = False):
coe = 1/pow(N, 0.5)
state = np.zeros(2**N, dtype = 'complex')
for i in range(N):
state[2**i] = coe
if log:
f = open('w_psi'+str(N)+'.txt', mode='w')
for i in range(2**N):
f.write(str(state[i].real)+' '+str(state[i].imag)+'\n')
f.close()
return state
def state_save(state, path=None):
N = int(np.log2(len(state)))
path = path if path else str(N)+' qubits_state'+'.txt'
f = open(path, mode='w')
for i in range(2 ** N):
f.write(str(state[i].real) + ' ' + str(state[i].imag) + '\n')
f.close()
print(path)
def amp_save(state, path=None):
N = int(np.log2(len(state)))
path = path if path else str(N)+' qubits_state_amp'+'.txt'
f = open(path, mode='w')
for i in range(2 ** N):
f.write(str(np.abs(state[i])) + ' ' + str(0.0000) + '\n')
f.close()
print(path)
def sparse_check(x):
tmp = x.flatten(order='C')
nonzero = 0
for i in range(len(tmp)):
if tmp[i] != 0:
nonzero += 1
return nonzero, nonzero/len(tmp)
def unitary_check(x):
threshold = 1e-10
distance = np.linalg.norm(np.dot(dagger(x),x)-np.eye(len(x)))
if distance<threshold:
return True
else:
print('not unitary, error = {}'.format(distance))
def set_bit_val(byte, index, N, val):
"""
更改某个字节中某一位(Bit)的值
:param byte: 准备更改的字节原值
:param index: 待更改位的序号,从右向左0开始,0-7为一个完整字节的8个位
:param val: 目标位预更改的值,0或1
:returns: 返回更改后字节的值
"""
if val:
return byte | (1 << (N-index))
else:
return byte & ~(1 << (N-index))
def site(data, N, i):
return data >> (N-i) & 1
def fastmul(m,n, gate, state):
N = int(np.log2(len(state)))
index = [2*site(i,N,m)+site(i,N,n) for i in range(2**N)]
gate = gate.T
tmat = gate[:, index]
v = np.arange(2**N).reshape(1,2**N).repeat(4,0)
for i in range(4):
p = site(i, 2, 1)
q = site(i, 2, 2)
v[i, :] = set_bit_val(v[i, :], m, N, p)
v[i, :] = set_bit_val(v[i, :], n, N, q)
v = state[v]
tmat *= v
res = tmat.sum(0)
return res
def swap(U, J, t, Delta=0):
H = np.array([[U, -np.sqrt(2)*J, 0, 0, 0, 0, 0, 0, 0,0],
[-np.sqrt(2)*J, Delta, -np.sqrt(2)*J, 0, 0, 0, 0, 0, 0,0],
[0, -np.sqrt(2)*J, U+2*Delta, 0, 0, 0, 0, 0, 0,0],
[0, 0, 0, Delta, 0, -J, -J, 0, 0, 0],
[0, 0, 0, 0, Delta, -J, -J, 0, 0, 0],
[0, 0, 0, -J, -J, U, 0, 0, 0, 0],
[0, 0, 0, -J, -J, 0, U+2*Delta, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, U, -np.sqrt(2)*J, 0],
[0, 0, 0, 0, 0, 0, 0, -np.sqrt(2)*J, Delta, -np.sqrt(2)*J],
[0, 0, 0, 0, 0, 0, 0, 0, -np.sqrt(2)*J, U+2*Delta]])
Evolution = expm(H * 2*np.pi*t*-1j)
swap = np.dot(trans_mat, Evolution)
swap = np.dot(swap, trans_mat.T)
swap /= 1j
return swap
def sto(t, Delta):
"""
only for 01/10 base
:param t:
:param Delta:
:return:
"""
phase = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 2*Delta, 0],
[0, 0, 0, 0]])
Evolution = expm(phase * 2*np.pi*t*-1j)
return Evolution
def noise(x):
mu, sigma = x, x/25.76
np.random.seed()
return np.random.normal(mu, sigma, 1)[0]
def swap_noise(U, J, t, Delta = 0):
# U, J, t, Delta = noise(U), noise(J), noise(t), Delta = noise(Delta)
return swap(noise(U), noise(J), noise(t), noise(Delta))
def sto_noise(t, Delta):
return sto(noise(t), noise(Delta))
def NumberOf1(n):
count = 0
while n&0xffffffff != 0:
count += 1
n = n & (n-1)
return count
def phase_shift(n):
phase = 0
for _ in range(n):
phase += noise(2*np.pi)
return phase
def dephase(n):
numbers = []
for i in range(2 ** n):
numbers.append(NumberOf1(i))
numbers = np.array(numbers)
dephase_mat = np.diag([np.exp(phase_shift(i)*-1j) for i in numbers])
return dephase_mat
def density_mat(x):
n = len(x)
sam_shape = len(x.shape)
if sam_shape == 1:
dim = len(x)
state = x.reshape(1, dim)
state /= np.linalg.norm(state)
den_mat = np.dot(dagger(state), state)
else:
dim = len(x[0])
den_mat = np.zeros((dim, dim))
for i in range(n):
state = x[i].reshape(1, dim)
state /= np.linalg.norm(state)
if not i:
den_mat = np.dot(dagger(state), state)
else:
den_mat += np.dot(dagger(state), state)
den_mat /= n
return den_mat
def fidelity_vec(x, y):
return (np.dot(x.conj(), y)*np.dot(y.conj(), x)/np.linalg.norm(x)**2/np.linalg.norm(y)**2).real
def fidelity_essemble(x,y):
n = len(y)
fidelity = 0
for i in range(n):
fidelity += fidelity_vec(x, y[i])
return fidelity/n
def purity(x):
return (np.trace(np.dot(x, x))).real
def distribution(x):
x /= np.linalg.norm(x, 2)
prob = np.zeros(x.size)
for i in range(len(x)):
prob[i] = np.abs(x[i])**2
return prob
def dis2state(x):
state = np.array([np.sqrt(x[i]) for i in range(len(x))])
return state
def sample(x, n=1):
N = int(np.log2(len(x)))
res = stats.rv_discrete(values=(range(len(distribution(x))), distribution(x))).rvs(size=n)
if n == 1:
return res[0]
else:
dis = sample_distribution(res, N)
kl = KL(distribution(x), dis)
print(kl)
return res, dis
def sample_distribution(sample, N):
n = len(sample)
sample_dis = np.zeros(2**N)
for i in sample:
sample_dis[i] += 1
sample_dis /= n
return sample_dis
def essemble_distribution(x):
n = len(x)
dis = np.array([x[i][i] for i in range(n)])
return dis.real
def KL(p, q):
divergence = 0
for i in range(len(p)):
if p[i] and q[i]:
divergence += p[i]*np.log(p[i]/q[i])
return divergence
def KL_new(P,Q):
N = len(P)
epsilon = 0.01/N
P = P + epsilon
Q = Q + epsilon
divergence = np.sum(P*np.log(P/Q))
return divergence
def sample_plot(dis, N, M, KL=None):
x = [int2bin(i,N) for i in range(len(dis))]
plt.bar(x, dis)
plt.ylim(0,1)
for x, y in enumerate(dis):
plt.text(x, y+0.02, '%s' %y, ha='center')
if not KL:
plt.title('{} qubits with {} measurements'.format(N, M))
else:
plt.title('{} qubits with {} measurements\n KL = {}'.format(N, M, KL))
plt.ylabel('Probility')
plt.show()
def trans_base(bases, x):
Z2Z = np.eye(2)
Z2X = 1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])
Z2Y = 1 / np.sqrt(2) * np.array([[1, -1j], [1, 1j]])
decode = {'Z': Z2Z, 'X': Z2X, 'Y': Z2Y, 'z': Z2Z, 'x': Z2X,'y': Z2Y}
tmp_mat = decode[bases[0]]
for i in range(1,len(bases)):
tmp_mat = np.kron(tmp_mat, decode[bases[i]])
return np.dot(tmp_mat, x)
def sample_bases(bases, state, M):
N = int(np.log2(len(state)))
f1 = open(str(N)+' qubits_measurement.txt', mode='w')
f2 = open(str(N)+' qubits_measurement_bases.txt', mode='w')
f3 = open(str(N)+' qubits_measurement_bases_set.txt', mode='w')
for i in bases:
measure = sample(trans_base(i, state), M)
for j in measure:
tmp = int2bin(j, N)
for k in tmp:
f1.write(k+' ')
f1.write('\n')
for k in i:
f2.write(k+' ')
f2.write('\n')
for j in i:
f3.write(j+' ')
f3.write('\n')
f1.close()
f2.close()
f3.close()
def Z_sample(state, M, error=0):
N = int(np.log2(len(state)))
f1 = open(str(N)+' qubits_measurement_z.txt', mode='w')
measure = sample(state, M)
dis = sample_distribution(measure, N)
kl = KL(dis, distribution(state))
print(kl)
if error:
measure = sample_error(measure, N, error)
dis = sample_distribution(measure, N)
kl = KL(dis, distribution(state))
print(kl)
for j in measure:
tmp = int2bin(j, N)
for k in tmp:
f1.write(k+' ')
f1.write('\n')
f1.close()
def sample_error(samples, n, error):
size = len(samples)
flip = bernoulli.rvs(n * error, size=size)
ker = [2 ** i for i in range(n)]
count = 0
for i in range(size):
if flip[i]:
count += 1
flip[i] = random.choice(ker)
print(count, count / size)
sample_new = np.array([samples[i] ^ flip[i] for i in range(size)])
return sample_new
def sample_save(samples, N, path=None):
path = path if path else str(N)+' qubits_measurement_z.txt'
f1 = open(path, mode='w')
for j in samples:
tmp = int2bin(j, N)
for k in tmp:
f1.write(k+' ')
f1.write('\n')
f1.close()
print(path) |
"""!
@brief Graph representation (uses format GRPR).
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import matplotlib.pyplot as plt
from matplotlib import colors
from enum import IntEnum
class type_graph_descr(IntEnum):
"""!
@brief Enumeration of graph description.
@details Matrix representation is list of lists where number of rows equals number of columns and each element
of square matrix determines whether there is connection between two vertices. For example:
[ [0, 1, 1], [1, 0, 1], [1, 1, 0] ].
Vector representation is list of lists where index of row corresponds to index of vertex and elements
of row consists of indexes of connected vertices. For example:
[ [1, 2], [0, 2], [0, 1] ].
"""
## Unknown graph representation.
GRAPH_UNKNOWN = 0;
## Matrix graph representation.
GRAPH_MATRIX_DESCR = 1;
## Vector graph representation.
GRAPH_VECTOR_DESCR = 2;
class graph:
"""!
@brief Graph representation.
"""
def __init__(self, data, type_graph = None, space_descr = None, comments = None):
"""!
@brief Constructor of graph.
@param[in] data (list): Representation of graph. Considered as matrix if 'type_graph' is not specified.
@param[in] type_graph (type_graph_descr): Type of graph representation in 'data'.
@param[in] space_descr (list): Coordinates of each vertex that are used for graph drawing (can be omitted).
@param[in] comments (string): Comments related to graph.
"""
self.__data = data;
self.__space_descr = space_descr;
self.__comments = comments;
if (type_graph is not None):
self.__type_graph = type_graph;
else:
self.__type_graph = type_graph_descr.GRAPH_MATRIX_DESCR;
for row in self.__data:
if (len(row) != len(self.__data)):
self.__type_graph = type_graph_descr.GRAPH_VECTOR_DESCR;
break;
for element in row:
if ( (element != 0) or (element != 1) ):
self.__type_graph = type_graph_descr.GRAPH_VECTOR_DESCR;
def __len__(self):
"""!
@return (uint) Size of graph defined by number of vertices.
"""
return len(self.__data);
@property
def data(self):
"""!
@return (list) Graph representation.
"""
return self.__data;
@property
def space_description(self):
"""!
@return (list) Space description.
"""
if (self.__space_descr == [] or self.__space_descr is None):
return None;
return self.__space_descr;
@property
def comments(self):
"""!
@return (string) Comments.
"""
return self.__comments;
@property
def type_graph_descr(self):
"""!
@return (type_graph_descr) Type of graph representation.
"""
return self.__type_graph;
def read_graph(filename):
"""!
@brief Read graph from file in GRPR format.
@param[in] filename (string): Path to file with graph in GRPR format.
@return (graph) Graph that is read from file.
"""
file = open(filename, 'r');
comments = "";
space_descr = [];
data = [];
data_type = None;
map_data_repr = dict(); # Used as a temporary buffer only when input graph is represented by edges.
for line in file:
if (line[0] == 'c' or line[0] == 'p'):
comments += line[1:];
elif (line[0] == 'r'):
node_coordinates = [float(val) for val in line[1:].split()];
if (len(node_coordinates) != 2):
raise NameError('Invalid format of space description for node (only 2-dimension space is supported)');
space_descr.append( [float(val) for val in line[1:].split()] );
elif (line[0] == 'm'):
if ( (data_type is not None) and (data_type != 'm') ):
raise NameError('Invalid format of graph representation (only one type should be used)');
data_type = 'm';
data.append( [float(val) for val in line[1:].split()] );
elif (line[0] == 'v'):
if ( (data_type is not None) and (data_type != 'v') ):
raise NameError('Invalid format of graph representation (only one type should be used)');
data_type = 'v';
data.append( [float(val) for val in line[1:].split()] );
elif (line[0] == 'e'):
if ( (data_type is not None) and (data_type != 'e') ):
raise NameError('Invalid format of graph representation (only one type should be used)');
data_type = 'e';
vertices = [int(val) for val in line[1:].split()];
if (vertices[0] not in map_data_repr):
map_data_repr[ vertices[0] ] = [ vertices[1] ];
else:
map_data_repr[ vertices[0] ].append(vertices[1])
if (vertices[1] not in map_data_repr):
map_data_repr[ vertices[1] ] = [ vertices[0] ];
else:
map_data_repr[ vertices[1] ].append(vertices[0]);
elif (len(line.strip()) == 0): continue;
else:
print(line);
raise NameError('Invalid format of file with graph description');
# In case of edge representation result should be copied.
if (data_type == 'e'):
for index in range(len(map_data_repr)):
data.append([0] * len(map_data_repr));
for index_neighbour in map_data_repr[index + 1]:
data[index][index_neighbour - 1] = 1;
file.close();
# Set graph description
graph_descr = None;
if (data_type == 'm'): graph_descr = type_graph_descr.GRAPH_MATRIX_DESCR;
elif (data_type == 'v'): graph_descr = type_graph_descr.GRAPH_VECTOR_DESCR;
elif (data_type == 'e'): graph_descr = type_graph_descr.GRAPH_MATRIX_DESCR;
else:
raise NameError('Invalid format of file with graph description');
if (space_descr != []):
if (len(data) != len(space_descr)):
raise NameError("Invalid format of file with graph - number of nodes is different in space representation and graph description");
return graph(data, graph_descr, space_descr, comments);
def draw_graph(graph_instance, map_coloring = None):
"""!
@brief Draw graph.
@param[in] graph_instance (graph): Graph that should be drawn.
@param[in] map_coloring (list): List of color indexes for each vertex. Size of this list should be equal to size of graph (number of vertices).
If it's not specified (None) than graph without coloring will be dwarn.
@warning Graph can be represented if there is space representation for it.
"""
if (graph_instance.space_description is None):
raise NameError("The graph haven't got representation in space");
if (map_coloring is not None):
if (len(graph_instance) != len(map_coloring)):
raise NameError("Size of graph should be equal to size coloring map");
fig = plt.figure();
axes = fig.add_subplot(111);
available_colors = ['#00a2e8', '#22b14c', '#ed1c24',
'#fff200', '#000000', '#a349a4',
'#ffaec9', '#7f7f7f', '#b97a57',
'#c8bfe7', '#880015', '#ff7f27',
'#3f48cc', '#c3c3c3', '#ffc90e',
'#efe4b0', '#b5e61d', '#99d9ea',
'#7092b4', '#ffffff'];
if (map_coloring is not None):
if (len(map_coloring) > len(available_colors)):
raise NameError('Impossible to represent colored graph due to number of specified colors.');
x_maximum = -float('inf');
x_minimum = float('inf');
y_maximum = -float('inf');
y_minimum = float('inf');
for i in range(0, len(graph_instance.space_description), 1):
if (graph_instance.type_graph_descr == type_graph_descr.GRAPH_MATRIX_DESCR):
for j in range(i, len(graph_instance.space_description), 1): # draw connection between two points only one time
if (graph_instance.data[i][j] == 1):
axes.plot([graph_instance.space_description[i][0], graph_instance.space_description[j][0]], [graph_instance.space_description[i][1], graph_instance.space_description[j][1]], 'k-', linewidth = 1.5);
elif (graph_instance.type_graph_descr == type_graph_descr.GRAPH_VECTOR_DESCR):
for j in graph_instance.data[i]:
if (i > j): # draw connection between two points only one time
axes.plot([graph_instance.space_description[i][0], graph_instance.space_description[j][0]], [graph_instance.space_description[i][1], graph_instance.space_description[j][1]], 'k-', linewidth = 1.5);
color_node = 'b';
if (map_coloring is not None):
color_node = colors.hex2color(available_colors[map_coloring[i]]);
axes.plot(graph_instance.space_description[i][0], graph_instance.space_description[i][1], color = color_node, marker = 'o', markersize = 20);
if (x_maximum < graph_instance.space_description[i][0]): x_maximum = graph_instance.space_description[i][0];
if (x_minimum > graph_instance.space_description[i][0]): x_minimum = graph_instance.space_description[i][0];
if (y_maximum < graph_instance.space_description[i][1]): y_maximum = graph_instance.space_description[i][1];
if (y_minimum > graph_instance.space_description[i][1]): y_minimum = graph_instance.space_description[i][1];
plt.xlim(x_minimum - 0.5, x_maximum + 0.5);
plt.ylim(y_minimum - 0.5, y_maximum + 0.5);
plt.show()
plt.close(fig) |
<gh_stars>1000+
"""
Unit tests for the Deis api app.
Run the tests with "./manage.py test api"
"""
from __future__ import unicode_literals
import json
import urllib
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from rest_framework.authtoken.models import Token
class AuthTest(TestCase):
fixtures = ['test_auth.json']
"""Tests user registration, authentication and authorization"""
def setUp(self):
self.admin = User.objects.get(username='autotest')
self.admin_token = Token.objects.get(user=self.admin).key
self.user1 = User.objects.get(username='autotest2')
self.user1_token = Token.objects.get(user=self.user1).key
self.user2 = User.objects.get(username='autotest3')
self.user2_token = Token.objects.get(user=self.user2).key
def test_auth(self):
"""
Test that a user can register using the API, login and logout
"""
# test registration workflow
username, password = '<PASSWORD>', 'password'
first_name, last_name = 'Otto', 'Test'
email = '<EMAIL>'
submit = {
'username': username,
'password': password,
'first_name': first_name,
'last_name': last_name,
'email': email,
# try to abuse superuser/staff level perms (not the first signup!)
'is_superuser': True,
'is_staff': True,
}
url = '/v1/auth/register'
response = self.client.post(url, json.dumps(submit), content_type='application/json')
self.assertEqual(response.status_code, 201)
for key in response.data:
self.assertIn(key, ['id', 'last_login', 'is_superuser', 'username', 'first_name',
'last_name', 'email', 'is_active', 'is_superuser', 'is_staff',
'date_joined', 'groups', 'user_permissions'])
expected = {
'username': username,
'email': email,
'first_name': first_name,
'last_name': last_name,
'is_active': True,
'is_superuser': False,
'is_staff': False
}
self.assertDictContainsSubset(expected, response.data)
# test login
url = '/v1/auth/login/'
payload = urllib.urlencode({'username': username, 'password': password})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 200)
@override_settings(REGISTRATION_MODE="disabled")
def test_auth_registration_disabled(self):
"""test that a new user cannot register when registration is disabled."""
url = '/v1/auth/register'
submit = {
'username': 'testuser',
'password': 'password',
'first_name': 'test',
'last_name': 'user',
'email': '<EMAIL>',
'is_superuser': False,
'is_staff': False,
}
response = self.client.post(url, json.dumps(submit), content_type='application/json')
self.assertEqual(response.status_code, 403)
@override_settings(REGISTRATION_MODE="admin_only")
def test_auth_registration_admin_only_fails_if_not_admin(self):
"""test that a non superuser cannot register when registration is admin only."""
url = '/v1/auth/register'
submit = {
'username': 'testuser',
'password': 'password',
'first_name': 'test',
'last_name': 'user',
'email': '<EMAIL>',
'is_superuser': False,
'is_staff': False,
}
response = self.client.post(url, json.dumps(submit), content_type='application/json')
self.assertEqual(response.status_code, 403)
@override_settings(REGISTRATION_MODE="admin_only")
def test_auth_registration_admin_only_works(self):
"""test that a superuser can register when registration is admin only."""
url = '/v1/auth/register'
username, password = '<PASSWORD>', 'password'
first_name, last_name = 'Otto', 'Test'
email = '<EMAIL>'
submit = {
'username': username,
'password': password,
'first_name': first_name,
'last_name': last_name,
'email': email,
# try to abuse superuser/staff level perms (not the first signup!)
'is_superuser': True,
'is_staff': True,
}
response = self.client.post(url, json.dumps(submit), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 201)
for key in response.data:
self.assertIn(key, ['id', 'last_login', 'is_superuser', 'username', 'first_name',
'last_name', 'email', 'is_active', 'is_superuser', 'is_staff',
'date_joined', 'groups', 'user_permissions'])
expected = {
'username': username,
'email': email,
'first_name': first_name,
'last_name': last_name,
'is_active': True,
'is_superuser': False,
'is_staff': False
}
self.assertDictContainsSubset(expected, response.data)
# test login
url = '/v1/auth/login/'
payload = urllib.urlencode({'username': username, 'password': password})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 200)
@override_settings(REGISTRATION_MODE="not_a_mode")
def test_auth_registration_fails_with_nonexistant_mode(self):
"""test that a registration should fail with a nonexistant mode"""
url = '/v1/auth/register'
submit = {
'username': 'testuser',
'password': 'password',
'first_name': 'test',
'last_name': 'user',
'email': '<EMAIL>',
'is_superuser': False,
'is_staff': False,
}
try:
self.client.post(url, json.dumps(submit), content_type='application/json')
except Exception, e:
self.assertEqual(str(e), 'not_a_mode is not a valid registation mode')
def test_cancel(self):
"""Test that a registered user can cancel her account."""
# test registration workflow
username, password = '<PASSWORD>', 'password'
submit = {
'username': username,
'password': password,
'first_name': 'Otto',
'last_name': 'Test',
'email': '<EMAIL>',
# try to abuse superuser/staff level perms
'is_superuser': True,
'is_staff': True,
}
other_username, other_password = '<PASSWORD>', 'password'
other_submit = {
'username': other_username,
'password': <PASSWORD>,
'first_name': 'Test',
'last_name': 'Tester',
'email': '<EMAIL>',
'is_superuser': False,
'is_staff': False,
}
url = '/v1/auth/register'
response = self.client.post(url, json.dumps(submit), content_type='application/json')
self.assertEqual(response.status_code, 201)
# cancel the account
url = '/v1/auth/cancel'
user = User.objects.get(username=username)
token = Token.objects.get(user=user).key
response = self.client.delete(url,
HTTP_AUTHORIZATION='token {}'.format(token))
self.assertEqual(response.status_code, 204)
url = '/v1/auth/register'
response = self.client.post(url, json.dumps(other_submit), content_type='application/json')
self.assertEqual(response.status_code, 201)
# normal user can't delete another user
url = '/v1/auth/cancel'
other_user = User.objects.get(username=other_username)
other_token = Token.objects.get(user=other_user).key
response = self.client.delete(url, json.dumps({'username': self.admin.username}),
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(other_token))
self.assertEqual(response.status_code, 403)
# admin can delete another user
response = self.client.delete(url, json.dumps({'username': other_username}),
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 204)
# user can not be deleted if it has an app attached to it
response = self.client.post(
'/v1/apps',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token)
)
self.assertEqual(response.status_code, 201)
app_id = response.data['id'] # noqa
self.assertIn('id', response.data)
response = self.client.delete(url, json.dumps({'username': str(self.admin)}),
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 409)
def test_passwd(self):
"""Test that a registered user can change the password."""
# test registration workflow
username, password = '<PASSWORD>', 'password'
first_name, last_name = 'Otto', 'Test'
email = '<EMAIL>'
submit = {
'username': username,
'password': password,
'first_name': first_name,
'last_name': last_name,
'email': email,
}
url = '/v1/auth/register'
response = self.client.post(url, json.dumps(submit), content_type='application/json')
self.assertEqual(response.status_code, 201)
# change password
url = '/v1/auth/passwd'
user = User.objects.get(username=username)
token = Token.objects.get(user=user).key
submit = {
'password': '<PASSWORD>',
'new_password': password,
}
response = self.client.post(url, json.dumps(submit), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(token))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {'detail': 'Current password does not match'})
self.assertEqual(response.get('content-type'), 'application/json')
submit = {
'password': password,
'new_password': '<PASSWORD>',
}
response = self.client.post(url, json.dumps(submit), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(token))
self.assertEqual(response.status_code, 200)
# test login with old password
url = '/v1/auth/login/'
payload = urllib.urlencode({'username': username, 'password': password})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 400)
# test login with new password
payload = urllib.urlencode({'username': username, 'password': '<PASSWORD>'})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 200)
def test_change_user_passwd(self):
"""
Test that an administrator can change a user's password, while a regular user cannot.
"""
# change password
url = '/v1/auth/passwd'
old_password = <PASSWORD>.user1.password
new_password = 'password'
submit = {
'username': self.user1.username,
'new_password': new_password,
}
response = self.client.post(url, json.dumps(submit), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 200)
# test login with old password
url = '/v1/auth/login/'
payload = urllib.urlencode({'username': self.user1.username, 'password': <PASSWORD>})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 400)
# test login with new password
payload = urllib.urlencode({'username': self.user1.username, 'password': <PASSWORD>})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 200)
# Non-admins can't change another user's password
submit['password'], submit['new_password'] = submit['<PASSWORD>password'], <PASSWORD>
url = '/v1/auth/passwd'
response = self.client.post(url, json.dumps(submit), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.user2_token))
self.assertEqual(response.status_code, 403)
# change back password with a regular user
response = self.client.post(url, json.dumps(submit), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.user1_token))
self.assertEqual(response.status_code, 200)
# test login with new password
url = '/v1/auth/login/'
payload = urllib.urlencode({'username': self.user1.username, 'password': <PASSWORD>})
response = self.client.post(url, data=payload,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 200)
def test_regenerate(self):
""" Test that token regeneration works"""
url = '/v1/auth/tokens/'
response = self.client.post(url, '{}', content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.data['token'], self.admin_token)
self.admin_token = Token.objects.get(user=self.admin)
response = self.client.post(url, '{"username" : "autotest2"}',
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.data['token'], self.user1_token)
response = self.client.post(url, '{"all" : "true"}',
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 200)
response = self.client.post(url, '{}', content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.admin_token))
self.assertEqual(response.status_code, 401)
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2019 HERE Europe B.V.
#
# SPDX-License-Identifier: MIT
# License-Filename: LICENSE
#
###############################################################################
import math
# hth GeoTools.ts
def tileXYToQuadKey(levelOfDetail, column, row):
quadKey = ""
for i in range(levelOfDetail,0,-1):
digit = 0
mask = 1 << (i - 1)
if (row & mask) != 0:
digit += 1
if (column & mask) != 0:
digit += 1
digit += 1
quadKey += str(digit)
return quadKey
# hth GeoTools.ts
def coord_to_percent_bing_reversed(coord, level):
longitude, latitude = coord
sinLatitude = math.sin((latitude * math.pi) / 180)
if abs(sinLatitude) == 1:
return coord_to_percent_bing_reversed([longitude, latitude+1e-9], level)
x_percent = max(0, min(1,
((longitude + 180) / 360)
))
y_percent = max(0, min(1,
1-(0.5 - math.log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * math.pi))
))
return [y_percent, x_percent]
# https://developer.here.com/documentation/map-tile/common/map_tile/topics/mercator-projection.html
def coord_to_percent_here_mercator(coord, level):
lon_, lat_ = coord
lon, lat = map(math.radians,coord)
tan = math.tan(math.pi/4 + lat/2)
if tan == 0:
return coord_to_percent([lon_,lat_+1e-9],level)
x = lon/math.pi
y = math.log(tan) / math.pi
xmin, xmax = -1, 1
fnY = lambda lat: math.log(math.tan(math.pi/4 + lat/2)) / math.pi
ymin, ymax = map(fnY,map(math.radians,[-(90-1e-9), 90-1e-9]))
col_percent = (x - xmin) / (xmax-xmin)
row_percent = max(0,min(1, (y - ymin) / (ymax-ymin))) # incorrect scale
return [row_percent, col_percent]
def coord_to_percent_here_simple(coord, level):
longitude, latitude = coord
x_percent = max(0, min(1,
((longitude + 180) / 360)
))
y_percent = max(0, min(1,
((latitude + 90) / 180)
))
return [y_percent, x_percent]
def get_row_col_bounds(level, schema="here"):
"""
schema "here"
[x,y], start from bottom left, go anti-clockwise
level 0: 0,0
level 1: 0,0; 1,0
level 2: 0,0; 0,1; 1,0; 1,1; 2,0; 2,1; 3,0; 3,1
"""
if schema == "here":
nrow = 2**(level-1) if level else 1
ncol = 2**level
else:
nrow = 2**(level) if level else 1
ncol = 2**level
return nrow, ncol
def coord_to_percent(coord, level, schema="here"):
if schema == "here":
row_percent, col_percent = coord_to_percent_here_simple(coord, level)
else:
row_percent, col_percent = coord_to_percent_bing_reversed(coord, level)
return row_percent, col_percent
def coord_to_row_col(coord, level, schema="here"):
r, c = coord_to_percent(coord, level, schema)
nrow, ncol = get_row_col_bounds(level, schema)
row = max(0,min(nrow-1, math.floor(r*nrow)))
col = max(0,min(ncol-1, math.floor(c*ncol)))
return row, col
# vector_tiles_reader, tile_helper.py
_upper_bound_scale_to_zoom_level = {
1000000000: 0,
500000000: 1,
200000000: 2,
50000000: 3,
25000000: 4,
12500000: 5,
6500000: 6,
3000000: 7,
1500000: 8,
750000: 9,
400000: 10,
200000: 11,
100000: 12,
50000: 13,
25000: 14,
12500: 15,
5000: 16,
2500: 17,
1500: 18,
750: 19,
500: 20,
250: 21,
100: 22,
0: 23,
}
def get_zoom_for_current_map_scale(canvas):
# canvas = self.iface.mapCanvas()
scale = int(round(canvas.scale()))
if scale < 0:
return 23
zoom = 0
for upper_bound in sorted(_upper_bound_scale_to_zoom_level):
if scale < upper_bound:
zoom = _upper_bound_scale_to_zoom_level[upper_bound]
break
return zoom
# bbox
from .bbox_utils import spiral_index
def bboxToLevelRowCol(x_min,y_min,x_max,y_max,level,schema="here"):
r1, c1 = coord_to_row_col([x_min,y_min],level,schema)
r2, c2 = coord_to_row_col([x_max,y_max],level,schema)
if r1 > r2:
r1, r2 = r2, r1
if c1 > c2:
c1, c2 = c2, c1
return r1, r2, c1, c2
def spiral_iter(lstX, lstY):
for ix, iy in spiral_index(len(lstX), len(lstY)):
yield (lstX[ix], lstY[iy])
def bboxToListColRow(x_min,y_min,x_max,y_max,level,schema="here"):
r1,r2,c1,c2 = bboxToLevelRowCol(x_min,y_min,x_max,y_max,level,schema)
lst_row = list(range(r1,r2+1))
lst_col = list(range(c1,c2+1))
return ["{level}_{col}_{row}".format(level=level,row=row,col=col)
for col, row in spiral_iter(lst_col, lst_row)]
def bboxToListQuadkey(x_min,y_min,x_max,y_max,level):
r1,r2,c1,c2 = bboxToLevelRowCol(x_min,y_min,x_max,y_max,level)
lst_row = list(range(r1,r2+1))
lst_col = list(range(c1,c2+1))
tiles = list()
cached = set()
for col, row in spiral_iter(lst_col, lst_row):
t = tileXYToQuadKey(level, col, row)
if t not in cached:
tiles.append(t)
cached.add(t)
return tiles
def spiral_fast_iter(x_min,y_min,x_max,y_max):
for ix, iy in spiral_index(x_max-x_min+1, y_max-y_min+1):
yield x_min + ix, y_min +iy
def bboxToListQuadkeyFast(x_min,y_min,x_max,y_max,level):
# not really faster
r1,r2,c1,c2 = bboxToLevelRowCol(x_min,y_min,x_max,y_max,level)
return [tileXYToQuadKey(level, col, row)
for row, col in spiral_fast_iter(r1,r2,c1,c2)]
|
<filename>python/zyzzyva.py
import time
import json
import hashlib
msg_types = {}
class MessageType:
def __init__(self, t):
self.t = t
msg_types[t] = self
def __str__(self):
return self.t
def str(self):
return self.t
def __repr__(self):
return 'MessageType(' + repr(self.t) + ')'
REQUEST = MessageType('REQUEST')
ORDER_REQ = MessageType('ORDER-REQ')
SPEC_RESPONSE = MessageType('SPEC-RESPONSE')
def MsgType(t):
return msg_types[t]
class World:
def __init__(self):
self.clients = {}
self.servers = {}
self.primary = None
self.max_failures = 1
world = World()
class Client:
def __init__(self, id):
self.id = id
self.current_operation = False
world.clients[id] = self
self.seq = 0
self.answers = {}
self.answered = []
self.ans_count = 0
def rx_message(self, stuff):
#print `self.__class__.__name__`, `self.id`, stuff
msg = json.loads(stuff)
if MsgType(msg['msgtype']) is SPEC_RESPONSE:
replica = msg['replica']
thing = '' + str(replica['view']) + '|' + str(replica['seq']) + '|' + replica['hn'] + '|' + replica['hr'] + '|' + replica['clientId']
if thing in self.answered:
return
if thing not in self.answers:
self.answers[thing] = []
self.answers.get(thing, []).append(msg)
self.ans_count += 1
if len(self.answers[thing]) >= (3 * world.max_failures + 1):
if replica['clientId'] == self.id:
print "** Answer:: ", `msg['response']`
else:
print "** Push:: ", `msg['response']`
self.answered.append(thing)
def do_operation(self, operation):
msg = {'msgtype': REQUEST.str(),
'operation': operation,
'time': time.time(),
'clientId': self.id}
world.primary.rx_message(json.dumps(msg))
class Server:
def __init__(self, id, is_master):
self.id = id
self.view = 0
self.seq = 0
self.is_master = is_master
self.hn = 'Initial'
world.servers[id] = self
if is_master:
world.primary = self
def rx_message(self, stuff):
#print `self.__class__.__name__`, `self.id`, stuff
msg = json.loads(stuff)
if MsgType(msg['msgtype']) is REQUEST:
if not self.is_master:
world.primary.rx_message(stuff)
return
self.seq += 1
d = hashlib.sha256()
d.update(stuff)
x = d.hexdigest()
hnh = hashlib.sha256()
hnh.update(self.hn)
hnh.update(x)
self.hn = hnh.hexdigest()
newmsg = {
'msgtype': ORDER_REQ.str(),
'primary': {
'view': self.view,
'seq': self.seq,
'hn': self.hn,
'd': x,
'nd': {}
},
'clientMessage': stuff
}
new_stuff = json.dumps(newmsg)
for id, replica in world.servers.items():
if id == self.id:
continue
replica.rx_message(new_stuff)
elif MsgType(msg['msgtype']) is ORDER_REQ:
primary = msg['primary']
if primary['view'] != self.view:
print `self.__class__.__name__`, `self.id`, "ERROR:", "Incorrect view"
return # Incorrect view
if primary['seq'] != self.seq + 1:
print `self.__class__.__name__`, `self.id`, "ERROR:", "Incorrect sequence"
return # Incorrect sequence
dh = hashlib.sha256()
dh.update(msg['clientMessage'])
d = dh.hexdigest()
if primary['d'] != d:
print `self.__class__.__name__`, `self.id`, "ERROR:", "Incorrect message hash"
return # Message Hash
hnh = hashlib.sha256()
hnh.update(self.hn)
hnh.update(d)
hn = hnh.hexdigest()
if primary['hn'] != hn:
print `self.__class__.__name__`, `self.id`, "ERROR:", "Incorrect history hash"
return
client = json.loads(msg['clientMessage'])
if client['operation'] != "Hello":
reply = 'Error' ## Note that errors are successful in the sense of Zyzzyva
else:
reply = 'World'
self.seq += 1
self.hn = hn
hrh = hashlib.sha256()
hrh.update(reply)
hr = hrh.hexdigest()
replymsg = {
'msgtype': SPEC_RESPONSE.str(),
'replica': {
'view': self.view,
'seq': self.seq,
'hn': self.hn,
'hr': hr,
'clientId': client['clientId']
},
'replicaId': self.id,
'response': reply,
'primary': primary
}
replydata = json.dumps(replymsg)
for id, client in world.clients.items():
client.rx_message(replydata)
c1 = Client('c1')
c2 = Client('c2')
servers = [Server('s%d' % x, x == 0) for x in range(7)]
c1.do_operation('Hello')
c2.do_operation('World!')
|
import characterquests
from characterutil import *
def sz_01_10_Wandering_Isle(count,datatree,openfile):
characterquests.charquestheader(count,"01-10: Wandering Isle",openfile)
def z_80_90_Jade_Forest(count,datatree,openfile):
characterquests.charquestheader(count,"80-90: Jade Forest",openfile)
characterquests.charquestprintfaction(count,datatree,openfile,29552,"Critical Condition","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29553,"The Missing Admiral","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29555,"The White Pawn","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29556,"Hozen Aren't Your Friends, Hozen Are Your Enemies","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29558,"The Path of War","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29559,"Freeing Our Brothers","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29560,"Ancient Power","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29562,"Jailbreak","alliance")
characterquests.charquestprint(count,datatree,openfile,29576,"An Air of Worry")
characterquests.charquestprint(count,datatree,openfile,29578,"Defiance")
characterquests.charquestprint(count,datatree,openfile,29579,"Rally the Survivors")
characterquests.charquestprint(count,datatree,openfile,29580,"Orchard-Supplied Hardware")
characterquests.charquestprint(count,datatree,openfile,29585,"Spitfire")
characterquests.charquestprint(count,datatree,openfile,29586,"The Splintered Path")
characterquests.charquestprint(count,datatree,openfile,29587,"Unbound")
characterquests.charquestprint(count,datatree,openfile,29617,"Tian Monastery")
characterquests.charquestprint(count,datatree,openfile,29618,"The High Elder")
characterquests.charquestprint(count,datatree,openfile,29619,"A Courteous Guest")
characterquests.charquestprint(count,datatree,openfile,29620,"The Great Banquet")
characterquests.charquestprint(count,datatree,openfile,29622,"Your Training Starts Now")
characterquests.charquestprint(count,datatree,openfile,29623,"Perfection")
characterquests.charquestprint(count,datatree,openfile,29624,"Attention")
characterquests.charquestprint(count,datatree,openfile,29626,"Groundskeeper Wu")
characterquests.charquestprint(count,datatree,openfile,29627,"A Proper Weapon")
characterquests.charquestprint(count,datatree,openfile,29628,"A Strong Back")
characterquests.charquestprint(count,datatree,openfile,29629,"A Steady Hand")
characterquests.charquestprint(count,datatree,openfile,29630,"And a Heavy Fist")
characterquests.charquestprint(count,datatree,openfile,29631,"Burning Bright")
characterquests.charquestprint(count,datatree,openfile,29632,"Becoming Battle-Ready")
characterquests.charquestprint(count,datatree,openfile,29633,"Zhi-Zhi, the Dextrous")
characterquests.charquestprint(count,datatree,openfile,29634,"Husshun, the Wizened")
characterquests.charquestprint(count,datatree,openfile,29635,"Xiao, the Eater")
characterquests.charquestprint(count,datatree,openfile,29636,"A Test of Endurance")
characterquests.charquestprint(count,datatree,openfile,29637,"The Rumpus")
characterquests.charquestprint(count,datatree,openfile,29639,"Flying Colors")
characterquests.charquestprint(count,datatree,openfile,29646,"Flying Colors")
characterquests.charquestprint(count,datatree,openfile,29647,"Flying Colors")
characterquests.charquestprint(count,datatree,openfile,29670,"<NAME>")
characterquests.charquestprintfaction(count,datatree,openfile,29694,"Regroup!","horde")
characterquests.charquestprint(count,datatree,openfile,29716,"The Double Hozen Dare")
characterquests.charquestprint(count,datatree,openfile,29717,"Down Kitty!")
characterquests.charquestprint(count,datatree,openfile,29723,"The Jade Witch")
characterquests.charquestprintfaction(count,datatree,openfile,29725,"SI:7 Report: Fire From the Sky","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29726,"SI:7 Report: Hostile Natives","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29727,"SI:7 Report: Take No Prisoners","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29730,"Scouting Report: Hostile Natives","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29731,"Scouting Report: On the Right Track","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29733,"SI:7 Report: Lost in the Woods","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29743,"Monstrosity","horde")
characterquests.charquestprint(count,datatree,openfile,29745,"The Sprites' Plight")
characterquests.charquestprint(count,datatree,openfile,29747,"Break the Cycle")
characterquests.charquestprint(count,datatree,openfile,29748,"Simulacrumble")
characterquests.charquestprint(count,datatree,openfile,29749,"An Urgent Plea")
characterquests.charquestprint(count,datatree,openfile,29750,"Vessels of the Spirit")
characterquests.charquestprint(count,datatree,openfile,29751,"Ritual Artifacts")
characterquests.charquestprint(count,datatree,openfile,29752,"The Wayward Dead")
characterquests.charquestprint(count,datatree,openfile,29753,"Back to Nature")
characterquests.charquestprint(count,datatree,openfile,29754,"To Bridge Earth and Sky")
characterquests.charquestprint(count,datatree,openfile,29755,"Pei-Back")
characterquests.charquestprint(count,datatree,openfile,29756,"A Humble Offering")
characterquests.charquestprintfaction(count,datatree,openfile,29759,"Kung Din","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29762,"Family Heirlooms","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29765,"Cryin' My Eyes Out","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29804,"Seein' Red","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29815,"Forensic Science","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29821,"Missed Me By... That Much!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29822,"Lay of the Land","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29823,"Scouting Report: The Friend of My Enemy","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29824,"Scouting Report: Like Jinyu in a Barrel","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29827,"Acid Rain","horde")
characterquests.charquestprint(count,datatree,openfile,29865,"The Silkwood Road")
characterquests.charquestprint(count,datatree,openfile,29866,"The Threads that Stick")
characterquests.charquestprintfaction(count,datatree,openfile,29879,"Swallowed Whole","horde")
characterquests.charquestprint(count,datatree,openfile,29881,"The Perfect Color")
characterquests.charquestprint(count,datatree,openfile,29882,"Quill of Stingers")
characterquests.charquestprintfaction(count,datatree,openfile,29883,"The Pearlfin Situation","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29885,"Road Rations","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29887,"The Elder's Instruments","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29888,"Seek Out the Lorewalker","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29889,"Borrowed Brew","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29890,"Finding Your Center","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29891,"Potency","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29892,"Body","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29893,"Hue","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29894,"Spirits of the Water","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29898,"Sacred Waters","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29899,"Rest in Peace","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29900,"An Ancient Legend","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29901,"Anduin's Decision","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29903,"A Perfect Match","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29904,"Bigger Fish to Fry","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29905,"Let Them Burn","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29906,"Carp Diem","alliance")
characterquests.charquestprint(count,datatree,openfile,29920,"Getting Permission")
characterquests.charquestprintfaction(count,datatree,openfile,29922,"In Search of Wisdom","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29924,"Kill Kher Shan","horde")
characterquests.charquestprint(count,datatree,openfile,29925,"All We Can Spare")
characterquests.charquestprint(count,datatree,openfile,29926,"Calamity Jade")
characterquests.charquestprint(count,datatree,openfile,29927,"Mann's Man")
characterquests.charquestprint(count,datatree,openfile,29928,"I Have No Jade And I Must Scream")
characterquests.charquestprint(count,datatree,openfile,29929,"Trapped!")
characterquests.charquestprint(count,datatree,openfile,29930,"What's Mined Is Yours")
characterquests.charquestprint(count,datatree,openfile,29931,"The Serpent's Heart")
characterquests.charquestprint(count,datatree,openfile,29932,"The Temple of the Jade Serpent")
characterquests.charquestprintfaction(count,datatree,openfile,29933,"The Bees' Knees","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29935,"Orders are Orders","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29936,"Instant Messaging","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29937,"Furious Fowl","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29939,"Boom Bait","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29941,"Beyond the Horizon","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29942,"Silly Wikket, Slickies are for Hozen","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29943,"Guerrillas in our Midst","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29966,"Burning Down the House","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29967,"Boom Goes the Doonamite!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29968,"Green-ish Energy","horde")
characterquests.charquestprintfaction(count,datatree,openfile,29971,"The Scouts Return","horde")
characterquests.charquestprint(count,datatree,openfile,29993,"Find the Boy")
characterquests.charquestprint(count,datatree,openfile,29995,"Shrine of the Dawn")
characterquests.charquestprint(count,datatree,openfile,29997,"The Scryer's Dilemma")
characterquests.charquestprint(count,datatree,openfile,29998,"The Librarian's Quandary")
characterquests.charquestprint(count,datatree,openfile,29999,"The Rider's Bind")
characterquests.charquestprint(count,datatree,openfile,30000,"The Jade Serpent")
characterquests.charquestprint(count,datatree,openfile,30001,"Moth-Ridden")
characterquests.charquestprint(count,datatree,openfile,30002,"Pages of History")
characterquests.charquestprint(count,datatree,openfile,30004,"Everything In Its Place")
characterquests.charquestprint(count,datatree,openfile,30005,"Lighting Up the Sky")
characterquests.charquestprint(count,datatree,openfile,30006,"The Darkness Around Us")
characterquests.charquestprint(count,datatree,openfile,30011,"A New Vision")
characterquests.charquestprintfaction(count,datatree,openfile,30015,"Dawn's Blossom","horde")
characterquests.charquestprint(count,datatree,openfile,30063,"Behind the Masks")
characterquests.charquestprint(count,datatree,openfile,30064,"Saving the Sutras")
characterquests.charquestprint(count,datatree,openfile,30065,"Arrows of Fortune")
characterquests.charquestprint(count,datatree,openfile,30066,"Hidden Power")
characterquests.charquestprint(count,datatree,openfile,30067,"The Shadow of Doubt")
characterquests.charquestprint(count,datatree,openfile,30068,"Flames of the Void")
characterquests.charquestprintfaction(count,datatree,openfile,30069,"No Plan Survives Contact with the Enemy","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30070,"The Fall of Ga'trul","alliance")
characterquests.charquestprint(count,datatree,openfile,30134,"Wild Things")
characterquests.charquestprint(count,datatree,openfile,30135,"Beating the Odds")
characterquests.charquestprint(count,datatree,openfile,30136,"Empty Nests")
characterquests.charquestprint(count,datatree,openfile,30137,"Egg Collection")
characterquests.charquestprint(count,datatree,openfile,30138,"Choosing the One")
characterquests.charquestprint(count,datatree,openfile,30139,"The Rider's Journey")
characterquests.charquestprint(count,datatree,openfile,30140,"The Rider's Journey")
characterquests.charquestprint(count,datatree,openfile,30141,"The Rider's Journey")
characterquests.charquestprint(count,datatree,openfile,30142,"It's A...")
characterquests.charquestprint(count,datatree,openfile,30143,"They Grow Like Weeds")
characterquests.charquestprint(count,datatree,openfile,30144,"Flight Training: Ring Round-Up")
characterquests.charquestprint(count,datatree,openfile,30145,"Flight Training: Full Speed Ahead")
characterquests.charquestprint(count,datatree,openfile,30146,"Snack Time")
characterquests.charquestprint(count,datatree,openfile,30147,"Fragments of the Past")
characterquests.charquestprint(count,datatree,openfile,30148,"Just a Flesh Wound")
characterquests.charquestprint(count,datatree,openfile,30149,"A Feast for the Senses")
characterquests.charquestprint(count,datatree,openfile,30150,"Sweet as Honey")
characterquests.charquestprint(count,datatree,openfile,30151,"Catch!")
characterquests.charquestprint(count,datatree,openfile,30152,"The Sky Race")
characterquests.charquestprint(count,datatree,openfile,30154,"The Easiest Way To A Serpent's Heart")
characterquests.charquestprint(count,datatree,openfile,30155,"Restoring the Balance")
characterquests.charquestprint(count,datatree,openfile,30156,"Feeding Time")
characterquests.charquestprint(count,datatree,openfile,30157,"Emptier Nests")
characterquests.charquestprint(count,datatree,openfile,30158,"Disarming the Enemy")
characterquests.charquestprint(count,datatree,openfile,30159,"Preservation")
characterquests.charquestprint(count,datatree,openfile,30187,"Flight Training: In Due Course")
characterquests.charquestprint(count,datatree,openfile,30188,"Riding the Skies")
characterquests.charquestprintfaction(count,datatree,openfile,30466,"Sufficient Motivation","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30484,"Gauging Our Progress","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30485,"Last Piece of the Puzzle","horde")
characterquests.charquestprint(count,datatree,openfile,30495,"Love's Labor")
characterquests.charquestprintfaction(count,datatree,openfile,30498,"Get Back Here!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30499,"Get Back Here!","horde")
characterquests.charquestprint(count,datatree,openfile,30500,"Residual Fallout")
characterquests.charquestprint(count,datatree,openfile,30502,"Jaded Heart")
characterquests.charquestprintfaction(count,datatree,openfile,30504,"Emergency Response","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30565,"An Unexpected Advantage","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30568,"Helping the Cause","alliance")
characterquests.charquestprint(count,datatree,openfile,30648,"Moving On")
characterquests.charquestprintfaction(count,datatree,openfile,31112,"They're So Thorny!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31121,"Stay a While, and Listen","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31130,"A Visit with Lorewalker Cho","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31132,"A Mile in My Shoes","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31134,"If These Stones Could Speak","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31152,"Peering Into the Past","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31167,"Family Tree","horde")
characterquests.charquestprint(count,datatree,openfile,31194,"Slitherscale Suppression")
characterquests.charquestprint(count,datatree,openfile,31230,"Welcome to Dawn's Blossom")
characterquests.charquestprintfaction(count,datatree,openfile,31239,"What's in a Name Name?","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31241,"Wicked Wikkets","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31261,"Captain Jack's Dead","horde")
characterquests.charquestprint(count,datatree,openfile,31303,"The Seal is Broken")
characterquests.charquestprint(count,datatree,openfile,31307,"FLAG - Jade Infused Blade")
characterquests.charquestprintfaction(count,datatree,openfile,31319,"Emergency Response","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31362,"Last Piece of the Puzzle","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31373,"The Order of the Cloud Serpent","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31375,"The Order of the Cloud Serpent","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31732,"Unleash Hell","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31733,"Touching Ground","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31734,"Welcome Wagons","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31735,"The Right Tool For The Job","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31736,"Envoy of the Alliance","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31737,"The Cost of War","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31738,"Pillaging Peons","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31739,"Priorities!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31740,"Koukou's Rampage","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31741,"Twinspire Keep","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31742,"Fractured Forces","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31743,"Smoke Before Fire","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31744,"Unfair Trade","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31745,"Onward and Inward","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31765,"Paint it Red!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31766,"Touching Ground","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31767,"Finish Them!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31768,"Fire Is Always the Answer","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31769,"The Final Blow!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31770,"You're Either With Us Or...","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31771,"Face to Face With Consequence","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31772,"Priorities!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31773,"Prowler Problems","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31774,"Seeking Zin'jun","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31775,"Assault on the Airstrip","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31776,"Strongarm Tactics","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31777,"Choppertunity","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31778,"Unreliable Allies","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31779,"The Darkness Within","horde")
characterquests.charquestprint(count,datatree,openfile,31784,"Onyx To Goodness")
characterquests.charquestprint(count,datatree,openfile,31810,"Riding the Skies")
characterquests.charquestprint(count,datatree,openfile,31811,"Riding the Skies")
characterquests.charquestprintfaction(count,datatree,openfile,31978,"Priorities!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31999,"Nazgrim's Command","horde")
characterquests.charquestprint(count,datatree,openfile,33250,"A Time-Lost Treasure")
characterquests.charquestprintfaction(count,datatree,openfile,49538,"Warchief's Command: Jade Forest!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,49556,"Hero's Call: Jade Forest!","alliance")
def z_81_90_Valley_of_the_Four_Winds(count,datatree,openfile):
characterquests.charquestheader(count,"81-90: Valley of the Four Winds",openfile)
characterquests.charquestprint(count,datatree,openfile,29577,"Ashyo's Vision")
characterquests.charquestprint(count,datatree,openfile,29581,"The Golden Dream")
characterquests.charquestprint(count,datatree,openfile,29600,"Snap Judgment")
characterquests.charquestprint(count,datatree,openfile,29757,"Bottletoads")
characterquests.charquestprint(count,datatree,openfile,29758,"Guess Whose Back")
characterquests.charquestprint(count,datatree,openfile,29871,"Clever Ashyo")
characterquests.charquestprint(count,datatree,openfile,29872,"Lin Tenderpaw")
characterquests.charquestprint(count,datatree,openfile,29877,"A Poor Grasp of the Basics")
characterquests.charquestprint(count,datatree,openfile,29907,"Chen and Li Li")
characterquests.charquestprint(count,datatree,openfile,29908,"A Seemingly Endless Nuisance")
characterquests.charquestprint(count,datatree,openfile,29909,"Low Turnip Turnout")
characterquests.charquestprint(count,datatree,openfile,29910,"Rampaging Rodents")
characterquests.charquestprint(count,datatree,openfile,29911,"Practically Perfect Produce")
characterquests.charquestprint(count,datatree,openfile,29912,"The Fabulous Miss Fanny")
characterquests.charquestprint(count,datatree,openfile,29913,"The Meat They'll Eat")
characterquests.charquestprint(count,datatree,openfile,29914,"Back to the Sty")
characterquests.charquestprint(count,datatree,openfile,29915,"A Neighbor's Duty")
characterquests.charquestprint(count,datatree,openfile,29916,"Piercing Talons and Slavering Jaws")
characterquests.charquestprint(count,datatree,openfile,29917,"Lupello")
characterquests.charquestprint(count,datatree,openfile,29918,"A Lesson in Bravery")
characterquests.charquestprint(count,datatree,openfile,29919,"Great Minds Drink Alike")
characterquests.charquestprint(count,datatree,openfile,29940,"Taking a Crop")
characterquests.charquestprint(count,datatree,openfile,29944,"Leaders Among Breeders")
characterquests.charquestprint(count,datatree,openfile,29945,"Yellow and Red Make Orange")
characterquests.charquestprint(count,datatree,openfile,29946,"The Warren-Mother")
characterquests.charquestprint(count,datatree,openfile,29947,"Crouching Carrot, Hidden Turnip")
characterquests.charquestprint(count,datatree,openfile,29948,"Thieves to the Core")
characterquests.charquestprint(count,datatree,openfile,29949,"Legacy")
characterquests.charquestprint(count,datatree,openfile,29950,"Li Li's Day Off")
characterquests.charquestprint(count,datatree,openfile,29951,"Muddy Water")
characterquests.charquestprint(count,datatree,openfile,29952,"Broken Dreams")
characterquests.charquestprint(count,datatree,openfile,29981,"Stemming the Swarm")
characterquests.charquestprint(count,datatree,openfile,29982,"Evacuation Orders")
characterquests.charquestprint(count,datatree,openfile,29983,"The Hidden Master")
characterquests.charquestprint(count,datatree,openfile,29984,"Unyielding Fists: Trial of Bamboo")
characterquests.charquestprint(count,datatree,openfile,29985,"They Will Be Mist")
characterquests.charquestprint(count,datatree,openfile,29986,"Fog Wards")
characterquests.charquestprint(count,datatree,openfile,29987,"Unyielding Fists: Trial of Wood")
characterquests.charquestprint(count,datatree,openfile,29988,"A Taste For Eggs")
characterquests.charquestprint(count,datatree,openfile,29989,"Unyielding Fists: Trial of Stone")
characterquests.charquestprint(count,datatree,openfile,29990,"Training and Discipline")
characterquests.charquestprint(count,datatree,openfile,29992,"Tenderpaw By Name, Tender Paw By Reputation")
characterquests.charquestprint(count,datatree,openfile,30028,"Grain Recovery")
characterquests.charquestprint(count,datatree,openfile,30029,"Wee Little Shenanigans")
characterquests.charquestprint(count,datatree,openfile,30030,"Out of Sprite")
characterquests.charquestprint(count,datatree,openfile,30031,"Taste Test")
characterquests.charquestprint(count,datatree,openfile,30032,"The Quest for Better Barley")
characterquests.charquestprint(count,datatree,openfile,30046,"Chen's Resolution")
characterquests.charquestprint(count,datatree,openfile,30047,"The Chen Taste Test")
characterquests.charquestprint(count,datatree,openfile,30048,"Li Li and the Grain")
characterquests.charquestprint(count,datatree,openfile,30049,"Doesn't Hold Water")
characterquests.charquestprint(count,datatree,openfile,30050,"Gard<NAME> and the Watering Can")
characterquests.charquestprint(count,datatree,openfile,30051,"The Great Water Hunt")
characterquests.charquestprint(count,datatree,openfile,30052,"Weed War")
characterquests.charquestprint(count,datatree,openfile,30053,"Hop Hunting")
characterquests.charquestprint(count,datatree,openfile,30054,"Enough is Ookin' Enough")
characterquests.charquestprint(count,datatree,openfile,30055,"Stormstout's Hops")
characterquests.charquestprint(count,datatree,openfile,30056,"The Farmer's Daughter")
characterquests.charquestprint(count,datatree,openfile,30057,"Seeing Orange")
characterquests.charquestprint(count,datatree,openfile,30058,"Mothallus!")
characterquests.charquestprint(count,datatree,openfile,30059,"The Moth Rebellion")
characterquests.charquestprint(count,datatree,openfile,30072,"Where Silk Comes From")
characterquests.charquestprint(count,datatree,openfile,30073,"The Emperor")
characterquests.charquestprint(count,datatree,openfile,30074,"Knocking on the Door")
characterquests.charquestprint(count,datatree,openfile,30075,"Clear the Way")
characterquests.charquestprint(count,datatree,openfile,30076,"The Fanciest Water")
characterquests.charquestprint(count,datatree,openfile,30077,"Barrels, Man")
characterquests.charquestprint(count,datatree,openfile,30078,"Cleaning House")
characterquests.charquestprint(count,datatree,openfile,30086,"The Search for the Hidden Master")
characterquests.charquestprint(count,datatree,openfile,30117,"Stoneplow Thirsts")
characterquests.charquestprint(count,datatree,openfile,30172,"Barreling Along")
characterquests.charquestprint(count,datatree,openfile,30181,"Mushan Mastery")
characterquests.charquestprint(count,datatree,openfile,30182,"Fox Mastery")
characterquests.charquestprint(count,datatree,openfile,30183,"Stalker Mastery")
characterquests.charquestprint(count,datatree,openfile,30184,"Mushan Mastery: Darkhide")
characterquests.charquestprint(count,datatree,openfile,30185,"Tortoise Mastery")
characterquests.charquestprint(count,datatree,openfile,30186,"Parental Mastery")
characterquests.charquestprintfaction(count,datatree,openfile,30241,"Warn Stoneplow","horde")
characterquests.charquestprint(count,datatree,openfile,30252,"A Helping Hand")
characterquests.charquestprint(count,datatree,openfile,30254,"Learn and Grow II: Tilling and Planting")
characterquests.charquestprint(count,datatree,openfile,30255,"Learn and Grow III: Tending Crops")
characterquests.charquestprint(count,datatree,openfile,30256,"Learn and Grow IV: Harvesting")
characterquests.charquestprint(count,datatree,openfile,30257,"Learn and Grow V: Halfhill Market")
characterquests.charquestprint(count,datatree,openfile,30258,"Mung-Mung's Vote I: A Hozen's Problem")
characterquests.charquestprint(count,datatree,openfile,30259,"Mung-Mung's Vote II: Rotten to the Core")
characterquests.charquestprint(count,datatree,openfile,30260,"Growing the Farm I: The Weeds")
characterquests.charquestprint(count,datatree,openfile,30267,"Watery Woes")
characterquests.charquestprint(count,datatree,openfile,30275,"A Crocolisk Tale")
characterquests.charquestprint(count,datatree,openfile,30325,"Where It Counts")
characterquests.charquestprint(count,datatree,openfile,30326,"The Kunzen Legend-Chief")
characterquests.charquestprint(count,datatree,openfile,30334,"Stealing is Bad... Re-Stealing is OK")
characterquests.charquestprintfaction(count,datatree,openfile,30360,"Warn Stoneplow","alliance")
characterquests.charquestprint(count,datatree,openfile,30376,"Hope Springs Eternal")
characterquests.charquestprint(count,datatree,openfile,30516,"Growing the Farm I: A Little Problem")
characterquests.charquestprint(count,datatree,openfile,30517,"Farmer Fung's Vote I: Yak Attack")
characterquests.charquestprint(count,datatree,openfile,30518,"Farmer Fung's Vote II: On the Loose")
characterquests.charquestprint(count,datatree,openfile,30519,"Nana's Vote I: Nana's Secret Recipe")
characterquests.charquestprint(count,datatree,openfile,30521,"Haohan's Vote I: Bungalow Break-In")
characterquests.charquestprint(count,datatree,openfile,30522,"Haohan's Vote II: The Real Culprits")
characterquests.charquestprint(count,datatree,openfile,30523,"Growing the Farm II: The Broken Wagon")
characterquests.charquestprint(count,datatree,openfile,30524,"Growing the Farm II: Knock on Wood")
characterquests.charquestprint(count,datatree,openfile,30525,"Haohan's Vote III: Pure Poison")
characterquests.charquestprint(count,datatree,openfile,30526,"Lost and Lonely")
characterquests.charquestprint(count,datatree,openfile,30527,"Haohan's Vote IV: Melons For Felons")
characterquests.charquestprint(count,datatree,openfile,30528,"Haohan's Vote V: Chief Yip-Yip")
characterquests.charquestprint(count,datatree,openfile,30529,"Growing the Farm III: The Mossy Boulder")
characterquests.charquestprint(count,datatree,openfile,30534,"A Second Hand")
characterquests.charquestprint(count,datatree,openfile,30535,"Learn and Grow I: Seeds")
characterquests.charquestprint(count,datatree,openfile,30622,"The Swarm Begins")
characterquests.charquestprint(count,datatree,openfile,30623,"The Mantidote")
characterquests.charquestprintfaction(count,datatree,openfile,30624,"It Does You No Good In The Keg","alliance")
characterquests.charquestprint(count,datatree,openfile,30625,"Students No More")
characterquests.charquestprint(count,datatree,openfile,30626,"Retreat!")
characterquests.charquestprint(count,datatree,openfile,30627,"The Savior of Stoneplow")
characterquests.charquestprint(count,datatree,openfile,30628,"The Gratitude of Stoneplow")
characterquests.charquestprintfaction(count,datatree,openfile,30653,"It Does You No Good In The Keg","horde")
characterquests.charquestprint(count,datatree,openfile,31312,"The Old Map")
characterquests.charquestprint(count,datatree,openfile,31313,"Just A Folk Story")
characterquests.charquestprint(count,datatree,openfile,31314,"Old Man Thistle's Treasure")
characterquests.charquestprint(count,datatree,openfile,31315,"The Heartland Legacy")
characterquests.charquestprint(count,datatree,openfile,31320,"Buy A Fish A Drink?")
characterquests.charquestprint(count,datatree,openfile,31321,"Buy A Fish A Round?")
characterquests.charquestprint(count,datatree,openfile,31322,"Buy A Fish A Keg?")
characterquests.charquestprint(count,datatree,openfile,31323,"Buy A Fish A Brewery?")
characterquests.charquestprint(count,datatree,openfile,31325,"A Very Nice Necklace")
characterquests.charquestprint(count,datatree,openfile,31326,"Tina's Tasteful Tiara")
characterquests.charquestprint(count,datatree,openfile,31328,"An Exquisite Earring")
characterquests.charquestprint(count,datatree,openfile,31329,"A Beautiful Brooch")
characterquests.charquestprint(count,datatree,openfile,31338,"Lost Sheepie")
characterquests.charquestprint(count,datatree,openfile,31339,"Lost Sheepie... Again")
characterquests.charquestprint(count,datatree,openfile,31340,"Oh Sheepie...")
characterquests.charquestprint(count,datatree,openfile,31341,"A Wolf In Sheep's Clothing")
characterquests.charquestprintfaction(count,datatree,openfile,31372,"The Tillers","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31374,"The Tillers","horde")
characterquests.charquestprint(count,datatree,openfile,31529,"Mission: Culling The Vermin")
characterquests.charquestprint(count,datatree,openfile,31530,"Mission: The Hozen Dozen")
characterquests.charquestprint(count,datatree,openfile,31531,"Mission: Aerial Threat")
characterquests.charquestprint(count,datatree,openfile,31532,"Mission: Predator of the Cliffs")
characterquests.charquestprint(count,datatree,openfile,31534,"The Beginner's Brew")
characterquests.charquestprint(count,datatree,openfile,31537,"Ella's Taste Test")
characterquests.charquestprint(count,datatree,openfile,31538,"A Worthy Brew")
characterquests.charquestprint(count,datatree,openfile,31671,"Why Not Scallions?")
characterquests.charquestprint(count,datatree,openfile,31936,"The \\\"Jinyu Princess\\\" Irrigation System")
characterquests.charquestprint(count,datatree,openfile,31937,"Thunder King Pest Repellers")
characterquests.charquestprint(count,datatree,openfile,31938,"The \\\"Earth-Slasher\\\" Master Plow")
characterquests.charquestprint(count,datatree,openfile,31945,"Learn and Grow VI: Gina's Vote")
characterquests.charquestprint(count,datatree,openfile,31946,"Mung-Mung's Vote III: The Great Carrot Caper")
characterquests.charquestprint(count,datatree,openfile,31947,"Farmer Fung's Vote III: Crazy For Cabbage")
characterquests.charquestprint(count,datatree,openfile,31948,"Nana's Vote II: The Sacred Springs")
characterquests.charquestprint(count,datatree,openfile,31949,"Nana's Vote III: Witchberry Julep")
characterquests.charquestprint(count,datatree,openfile,32018,"His Name Was... Stormstout")
characterquests.charquestprint(count,datatree,openfile,32019,"They Call Him... Stormstout")
characterquests.charquestprint(count,datatree,openfile,32035,"Got Silk?")
characterquests.charquestprint(count,datatree,openfile,32038,"Stag Mastery")
characterquests.charquestprint(count,datatree,openfile,32045,"Children of the Water")
characterquests.charquestprint(count,datatree,openfile,32189,"A Shabby New Face")
characterquests.charquestprint(count,datatree,openfile,32198,"One Magical, Flying Kingdom's Trash...")
characterquests.charquestprint(count,datatree,openfile,32682,"Inherit the Earth")
characterquests.charquestprint(count,datatree,openfile,38935,"His Name Was... Stormstout")
characterquests.charquestprintfaction(count,datatree,openfile,49539,"Warchief's Command: Valley of the Four Winds!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,49557,"Hero's Call: Valley of the Four Winds!","alliance")
def z_81_90_Krasarang_Wilds(count,datatree,openfile):
characterquests.charquestheader(count,"81-90: Krasarang Wilds",openfile)
characterquests.charquestprint(count,datatree,openfile,29873,"Ken-Ken")
characterquests.charquestprintfaction(count,datatree,openfile,29874,"Kang Bramblestaff","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,29875,"Kang Bramblestaff","horde")
characterquests.charquestprint(count,datatree,openfile,30079,"What's Eating Zhu's Watch?")
characterquests.charquestprint(count,datatree,openfile,30080,"Finding Yi-Mo")
characterquests.charquestprint(count,datatree,openfile,30081,"Materia Medica")
characterquests.charquestprint(count,datatree,openfile,30082,"Cheer Up, Yi-Mo")
characterquests.charquestprint(count,datatree,openfile,30083,"Securing the Province")
characterquests.charquestprint(count,datatree,openfile,30084,"Borderlands")
characterquests.charquestprint(count,datatree,openfile,30088,"Why So Serious?")
characterquests.charquestprint(count,datatree,openfile,30089,"Apply Directly to the Forehead")
characterquests.charquestprint(count,datatree,openfile,30090,"Zhu's Despair")
characterquests.charquestprint(count,datatree,openfile,30091,"Tears of Pandaria")
characterquests.charquestprintfaction(count,datatree,openfile,30121,"Search Party","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30123,"<NAME>","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30124,"Blind Them!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30127,"Threat from Dojan","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30128,"The Pools of Youth","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30129,"The Mogu Agenda","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30130,"Herbal Remedies","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30131,"Life","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30132,"Going West","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30133,"Into the Wilds","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30163,"For the Tribe","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30164,"The Stoneplow Convoy","horde")
characterquests.charquestprint(count,datatree,openfile,30168,"Thieving Raiders")
characterquests.charquestprint(count,datatree,openfile,30169,"Raid Leader Slovan")
characterquests.charquestprintfaction(count,datatree,openfile,30174,"For Family","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30175,"The Mantid","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30178,"Into the Wilds","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30179,"Poisoned!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30229,"The Greater Danger","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30230,"Re-Reclaim","horde")
characterquests.charquestprint(count,datatree,openfile,30268,"The Murksweats")
characterquests.charquestprint(count,datatree,openfile,30269,"Unsafe Passage")
characterquests.charquestprint(count,datatree,openfile,30270,"Blinding the Riverblades")
characterquests.charquestprint(count,datatree,openfile,30271,"Sha Can Awe")
characterquests.charquestprint(count,datatree,openfile,30272,"Striking the Rain")
characterquests.charquestprint(count,datatree,openfile,30273,"In the House of the Red Crane")
characterquests.charquestprint(count,datatree,openfile,30274,"The Arcanic Oubliette")
characterquests.charquestprintfaction(count,datatree,openfile,30344,"The Lost Dynasty","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30346,"Where are the Pools","alliance")
characterquests.charquestprint(count,datatree,openfile,30347,"The Pools of Youth")
characterquests.charquestprintfaction(count,datatree,openfile,30348,"Immortality?","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30349,"Threat from Dojan","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30350,"Squirmy Delight","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30351,"Lotus Tea","alliance")
characterquests.charquestprint(count,datatree,openfile,30352,"Crane Mastery")
characterquests.charquestprint(count,datatree,openfile,30353,"Profit Mastery")
characterquests.charquestprintfaction(count,datatree,openfile,30354,"No Sister Left Behind","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30355,"Re-Reclaim","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30356,"Sever Their Supply Line","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30357,"The Stoneplow Convoy","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30359,"The Lord Reclaimer","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30361,"The Mantid","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30363,"Going on the Offensive","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30384,"Blind Them!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30445,"The Waters of Youth","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30461,"Into the Wilds","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30462,"Into the Wilds","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30464,"Going West","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30465,"Going on the Offensive","alliance")
characterquests.charquestprint(count,datatree,openfile,30666,"Sudden, Unexpected Crocolisk Aggression")
characterquests.charquestprint(count,datatree,openfile,30667,"Particular Plumage")
characterquests.charquestprint(count,datatree,openfile,30668,"Build Your Own Raft")
characterquests.charquestprint(count,datatree,openfile,30669,"The Lorewalker on the Lake")
characterquests.charquestprint(count,datatree,openfile,30671,"Wisdom Has A Price")
characterquests.charquestprint(count,datatree,openfile,30672,"Balance")
characterquests.charquestprint(count,datatree,openfile,30674,"Balance Without Violence")
characterquests.charquestprint(count,datatree,openfile,30675,"Buried Hozen Treasure")
characterquests.charquestprint(count,datatree,openfile,30691,"Misery")
characterquests.charquestprint(count,datatree,openfile,30694,"Tread Lightly")
characterquests.charquestprint(count,datatree,openfile,30695,"Ahead on the Way")
characterquests.charquestprint(count,datatree,openfile,31260,"Profit Mastery: Chasheen")
characterquests.charquestprint(count,datatree,openfile,31262,"Crane Mastery: Needlebeak")
characterquests.charquestprintfaction(count,datatree,openfile,31369,"The Anglers","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31370,"The Anglers","horde")
characterquests.charquestprintfaction(count,datatree,openfile,49540,"Warchief's Command: Krasarang Wilds!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,49558,"Hero's Call: Krasarang Wilds!","alliance")
def z_82_90_KunLai_Summit(count,datatree,openfile):
characterquests.charquestheader(count,"82-90: KunLai Summit",openfile)
characterquests.charquestprint(count,datatree,openfile,30457,"Call Out Their Leader")
characterquests.charquestprint(count,datatree,openfile,30459,"All of the Arrows")
characterquests.charquestprint(count,datatree,openfile,30460,"Hit Medicine")
characterquests.charquestprint(count,datatree,openfile,30467,"My Son...")
characterquests.charquestprint(count,datatree,openfile,30468,"Enraged Vengeance")
characterquests.charquestprint(count,datatree,openfile,30469,"Repossession")
characterquests.charquestprint(count,datatree,openfile,30480,"The Ritual")
characterquests.charquestprint(count,datatree,openfile,30487,"Comin' Round the Mountain")
characterquests.charquestprint(count,datatree,openfile,30488,"The Missing Muskpaw")
characterquests.charquestprint(count,datatree,openfile,30489,"Fresh Needle Scent")
characterquests.charquestprint(count,datatree,openfile,30490,"Yakity Yak")
characterquests.charquestprint(count,datatree,openfile,30491,"At the Yak Wash")
characterquests.charquestprint(count,datatree,openfile,30492,"Back in Yak")
characterquests.charquestprint(count,datatree,openfile,30496,"The Waterspeaker's Staff")
characterquests.charquestprintfaction(count,datatree,openfile,30506,"<NAME> has Awakened","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30507,"<NAME> has Awakened","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30508,"<NAME> has Awakened","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30509,"General Nazgrim has Awakened","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30510,"General Nazgrim has Awakened","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30511,"General Nazgrim has Awakened","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30512,"Westwind Rest","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30513,"Eastwind Rest","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30514,"Challenge Accepted","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30515,"Challenge Accepted","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30569,"Trouble on the Farmstead","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30570,"Trouble on the Farmstead","horde")
characterquests.charquestprint(count,datatree,openfile,30571,"Farmhand Freedom")
characterquests.charquestprintfaction(count,datatree,openfile,30575,"Round 'Em Up","alliance")
characterquests.charquestprint(count,datatree,openfile,30581,"... and the Pot, Too!")
characterquests.charquestprint(count,datatree,openfile,30582,"The Late Mrs. Muskpaw")
characterquests.charquestprintfaction(count,datatree,openfile,30583,"Blue Dwarf Needs Food Badly","alliance")
characterquests.charquestprint(count,datatree,openfile,30587,"Yakity Yak")
characterquests.charquestprint(count,datatree,openfile,30592,"The Burlap Trail: To Burlap Waystation")
characterquests.charquestprintfaction(count,datatree,openfile,30593,"Deanimate the Reanimated","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30594,"Deanimate the Reanimated","horde")
characterquests.charquestprint(count,datatree,openfile,30595,"Profiting off of the Past")
characterquests.charquestprint(count,datatree,openfile,30596,"A Zandalari Troll?")
characterquests.charquestprint(count,datatree,openfile,30599,"A Monkey Idol")
characterquests.charquestprint(count,datatree,openfile,30600,"No Pack Left Behind")
characterquests.charquestprint(count,datatree,openfile,30601,"Instant Courage")
characterquests.charquestprint(count,datatree,openfile,30602,"The Rabbitsfoot")
characterquests.charquestprint(count,datatree,openfile,30603,"The Broketooth Ravage")
characterquests.charquestprint(count,datatree,openfile,30604,"Breaking Broketooth")
characterquests.charquestprint(count,datatree,openfile,30605,"Bros Before Hozen")
characterquests.charquestprint(count,datatree,openfile,30606,"Thumping Knucklethump")
characterquests.charquestprint(count,datatree,openfile,30607,"Hozen Love Their Keys")
characterquests.charquestprint(count,datatree,openfile,30608,"The Snackrifice")
characterquests.charquestprint(count,datatree,openfile,30610,"Grummle! Grummle! Grummle!")
characterquests.charquestprint(count,datatree,openfile,30611,"Unleash The Yeti!")
characterquests.charquestprint(count,datatree,openfile,30612,"The Leader Hozen")
characterquests.charquestprint(count,datatree,openfile,30614,"Oil Stop")
characterquests.charquestprint(count,datatree,openfile,30615,"A Zandalari Troll?")
characterquests.charquestprint(count,datatree,openfile,30616,"Traffic Issues")
characterquests.charquestprint(count,datatree,openfile,30617,"Roadside Assistance")
characterquests.charquestprint(count,datatree,openfile,30618,"Resupplying One Keg")
characterquests.charquestprintfaction(count,datatree,openfile,30619,"Mogu?! Oh No-gu!","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30620,"Mogu?! Oh No-gu!","horde")
characterquests.charquestprint(count,datatree,openfile,30621,"They Stole My Luck!")
characterquests.charquestprintfaction(count,datatree,openfile,30650,"Pandaren Prisoners","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30651,"Barrels of Fun","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30652,"In Tents Channeling","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30655,"<NAME>","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30656,"Barrels of Fun","horde")
characterquests.charquestprintfaction(count,datatree,openfile,30657,"In Tents Channeling","horde")
characterquests.charquestprint(count,datatree,openfile,30660,"The Ordo Warbringer")
characterquests.charquestprint(count,datatree,openfile,30661,"The Ordo Warbringer")
characterquests.charquestprintfaction(count,datatree,openfile,30662,"The Ordo Warbringer","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,30663,"The Ordo Warbringer","horde")
characterquests.charquestprint(count,datatree,openfile,30665,"The Defense of Shado-Pan Fallback")
characterquests.charquestprint(count,datatree,openfile,30670,"Turnabout")
characterquests.charquestprint(count,datatree,openfile,30673,"Holed Up")
characterquests.charquestprint(count,datatree,openfile,30680,"Holed Up")
characterquests.charquestprint(count,datatree,openfile,30681,"Holed Up")
characterquests.charquestprint(count,datatree,openfile,30682,"Holed Up")
characterquests.charquestprint(count,datatree,openfile,30683,"One Traveler's Misfortune")
characterquests.charquestprint(count,datatree,openfile,30684,"Seeker's Folly")
characterquests.charquestprint(count,datatree,openfile,30690,"Unmasking the Yaungol")
characterquests.charquestprint(count,datatree,openfile,30692,"The Burlap Trail: To Kota Basecamp")
characterquests.charquestprint(count,datatree,openfile,30699,"To Winter's Blossom")
characterquests.charquestprint(count,datatree,openfile,30710,"Provoking the Trolls")
characterquests.charquestprint(count,datatree,openfile,30715,"A Line Unbroken")
characterquests.charquestprint(count,datatree,openfile,30723,"Honor, Even in Death")
characterquests.charquestprint(count,datatree,openfile,30724,"To the Wall!")
characterquests.charquestprint(count,datatree,openfile,30742,"Shut it Down")
characterquests.charquestprint(count,datatree,openfile,30743,"Gourmet Kafa")
characterquests.charquestprint(count,datatree,openfile,30744,"Kota Blend")
characterquests.charquestprint(count,datatree,openfile,30745,"Trouble Brewing")
characterquests.charquestprint(count,datatree,openfile,30746,"A Fair Trade")
characterquests.charquestprint(count,datatree,openfile,30747,"The Burlap Grind")
characterquests.charquestprint(count,datatree,openfile,30750,"Off the Wall!")
characterquests.charquestprint(count,datatree,openfile,30751,"A Terrible Sacrifice")
characterquests.charquestprint(count,datatree,openfile,30752,"Unbelievable!")
characterquests.charquestprint(count,datatree,openfile,30765,"Regaining Honor")
characterquests.charquestprint(count,datatree,openfile,30766,"Profiting off of the Past")
characterquests.charquestprint(count,datatree,openfile,30794,"Emergency Care")
characterquests.charquestprint(count,datatree,openfile,30795,"Staying Connected")
characterquests.charquestprint(count,datatree,openfile,30796,"An End to Everything")
characterquests.charquestprint(count,datatree,openfile,30797,"It Was Almost Alive")
characterquests.charquestprint(count,datatree,openfile,30798,"Breaking the Emperor's Shield")
characterquests.charquestprint(count,datatree,openfile,30799,"The Tomb of Shadows")
characterquests.charquestprint(count,datatree,openfile,30800,"Stealing Their Thunder King")
characterquests.charquestprint(count,datatree,openfile,30801,"Lessons from History")
characterquests.charquestprint(count,datatree,openfile,30802,"Chasing the Storm")
characterquests.charquestprint(count,datatree,openfile,30804,"The Fearmaster")
characterquests.charquestprint(count,datatree,openfile,30805,"Justice")
characterquests.charquestprint(count,datatree,openfile,30806,"The Scent of Life")
characterquests.charquestprint(count,datatree,openfile,30807,"By the Falls, For the Fallen")
characterquests.charquestprint(count,datatree,openfile,30808,"A Grummle's Luck")
characterquests.charquestprint(count,datatree,openfile,30816,"Checking In")
characterquests.charquestprint(count,datatree,openfile,30819,"Preparing the Remains")
characterquests.charquestprint(count,datatree,openfile,30820,"A Funeral")
characterquests.charquestprint(count,datatree,openfile,30821,"The Burlap Grind")
characterquests.charquestprint(count,datatree,openfile,30823,"Shut it Down")
characterquests.charquestprint(count,datatree,openfile,30824,"Gourmet Kafa")
characterquests.charquestprint(count,datatree,openfile,30825,"Kota Blend")
characterquests.charquestprint(count,datatree,openfile,30826,"Trouble Brewing")
characterquests.charquestprint(count,datatree,openfile,30828,"Cleansing the Mere")
characterquests.charquestprint(count,datatree,openfile,30829,"The Tongue of Ba-Shon")
characterquests.charquestprint(count,datatree,openfile,30834,"Father and Child Reunion")
characterquests.charquestprint(count,datatree,openfile,30855,"The Fall of Shai Hu")
characterquests.charquestprint(count,datatree,openfile,30879,"Round 1: Brewmaster Chani")
characterquests.charquestprint(count,datatree,openfile,30880,"Round 1: The Streetfighter")
characterquests.charquestprint(count,datatree,openfile,30881,"Round 2: Clever Ashyo & Ken-Ken")
characterquests.charquestprint(count,datatree,openfile,30882,"Round 2: Kang Bramblestaff")
characterquests.charquestprint(count,datatree,openfile,30883,"Round 3: The Wrestler")
characterquests.charquestprint(count,datatree,openfile,30885,"Round 3: Master Boom Boom")
characterquests.charquestprint(count,datatree,openfile,30902,"Round 4: Master Windfur")
characterquests.charquestprint(count,datatree,openfile,30907,"Round 4: The P.U.G.")
characterquests.charquestprint(count,datatree,openfile,30935,"Fisherman's Tale")
characterquests.charquestprint(count,datatree,openfile,30942,"Make A Fighter Out of Me")
characterquests.charquestprint(count,datatree,openfile,30943,"Handle With Care")
characterquests.charquestprint(count,datatree,openfile,30944,"It Takes A Village")
characterquests.charquestprint(count,datatree,openfile,30945,"What's Yours Is Mine")
characterquests.charquestprint(count,datatree,openfile,30946,"Revelations")
characterquests.charquestprint(count,datatree,openfile,30967,"Free the Dissenters")
characterquests.charquestprint(count,datatree,openfile,30991,"Do a Barrel Roll!")
characterquests.charquestprint(count,datatree,openfile,30992,"Finish This!")
characterquests.charquestprint(count,datatree,openfile,30993,"Where are My Reinforcements?")
characterquests.charquestprint(count,datatree,openfile,30994,"Lao-Chin's Gambit")
characterquests.charquestprint(count,datatree,openfile,30999,"Path Less Traveled")
characterquests.charquestprint(count,datatree,openfile,31011,"Enemies At Our Door")
characterquests.charquestprint(count,datatree,openfile,31207,"The Arena of Annihilation")
characterquests.charquestprint(count,datatree,openfile,31228,"Prophet Khar'zul")
characterquests.charquestprintfaction(count,datatree,openfile,31251,"Best Meals Anywhere!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31252,"Back to Westwind Rest","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31253,"Back to Eastwind Rest","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31254,"The Road to Kun-Lai","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31255,"The Road to Kun-Lai","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31256,"Round 'Em Up","horde")
characterquests.charquestprint(count,datatree,openfile,31285,"The Spring Drifter")
characterquests.charquestprint(count,datatree,openfile,31286,"Robbing Robbers of Robbers")
characterquests.charquestprint(count,datatree,openfile,31287,"Educating Saurok")
characterquests.charquestprint(count,datatree,openfile,31306,"Seeker's Folly")
characterquests.charquestprint(count,datatree,openfile,31380,"Trial At The Temple of the White Tiger")
characterquests.charquestprint(count,datatree,openfile,31381,"Trial At The Temple of the White Tiger")
characterquests.charquestprintfaction(count,datatree,openfile,31392,"Temple of the White Tiger","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31393,"Temple of the White Tiger","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31394,"A Celestial Experience","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31395,"A Celestial Experience","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31451,"The Missing Merchant","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31452,"The Missing Merchant","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31453,"The Shado-Pan","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31455,"The Shado-Pan","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31456,"Muskpaw Ranch","alliance")
characterquests.charquestprintfaction(count,datatree,openfile,31457,"Muskpaw Ranch","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31459,"Cho's Missive","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31460,"Cho's Missive","alliance")
characterquests.charquestprint(count,datatree,openfile,31492,"The Torch of Strength")
characterquests.charquestprintfaction(count,datatree,openfile,31511,"A Witness to History","horde")
characterquests.charquestprintfaction(count,datatree,openfile,31512,"A Witness to History","alliance")
characterquests.charquestprint(count,datatree,openfile,31517,"Contending With Bullies")
characterquests.charquestprint(count,datatree,openfile,31518,"The Vale of Eternal Blossoms")
characterquests.charquestprint(count,datatree,openfile,38936,"The Road to Kun-Lai")
characterquests.charquestprintfaction(count,datatree,openfile,49541,"Warchief's Command: Kun-Lai Summit!","horde")
characterquests.charquestprintfaction(count,datatree,openfile,49559,"Hero's Call: Kun-Lai Summit!","alliance")
def z_83_90_Townlong_Steppes(count,datatree,openfile):
characterquests.charquestheader(count,"83-90: Townlong Steppes",openfile)
def z_84_90_Dread_Wastes(count,datatree,openfile):
characterquests.charquestheader(count,"84-90: Dread Wastes",openfile)
def z_85_90_Vale_of_Eternal_Blossoms(count,datatree,openfile):
characterquests.charquestheader(count,"85-90: Vale of Eternal Blossoms",openfile)
def z_85_90_Isle_of_Thunder(count,datatree,openfile):
characterquests.charquestheader(count,"85-90: Isle of Thunder",openfile)
def z_85_90_Timeless_Isle(count,datatree,openfile):
characterquests.charquestheader(count,"85-90: Timeless Isle",openfile)
def z_85_90_Pandaren_Campaign(count,datatree,openfile):
characterquests.charquestheader(count,"85-90: Pandaren Campaign",openfile)
|
<gh_stars>0
# Version: 2020.02.21
#
# MIT License
#
# Copyright (c) 2018 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import math
import cv2
from skimage import transform as stf
def transform(data, center, output_size, scale, rotation):
scale_ratio = float(output_size)/scale
rot = float(rotation)*np.pi/180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = stf.SimilarityTransform(scale=scale_ratio)
cx = center[0]*scale_ratio
cy = center[1]*scale_ratio
t2 = stf.SimilarityTransform(translation=(-1*cx, -1*cy))
t3 = stf.SimilarityTransform(rotation=rot)
t4 = stf.SimilarityTransform(translation=(output_size/2, output_size/2))
t = t1+t2+t3+t4
trans = t.params[0:2]
#print('M', scale, rotation, trans)
cropped = cv2.warpAffine(data,trans,(output_size, output_size), borderValue = 0.0)
return cropped, trans
def transform_pt(pt, trans):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(trans, new_pt)
#print('new_pt', new_pt.shape, new_pt)
return new_pt[:2]
def gaussian(img, pt, sigma):
# Draw a 2D gaussian
assert(sigma>=0)
if sigma==0:
img[pt[1], pt[0]] = 1.0
return True
#assert pt[0]<=img.shape[1]
#assert pt[1]<=img.shape[0]
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] > img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
#print('gaussian error')
return False
#return img
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return True
#return img
def estimate_trans_bbox(face, input_size, s = 2.0):
w = face[2] - face[0]
h = face[3] - face[1]
wc = int( (face[2]+face[0])/2 )
hc = int( (face[3]+face[1])/2 )
im_size = max(w, h)
#size = int(im_size*1.2)
scale = input_size/(max(w,h)*s)
M = [
[scale, 0, input_size/2-wc*scale],
[0, scale, input_size/2-hc*scale],
]
M = np.array(M)
return M
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 25 10:37:17 2021
@author: yonnss
"""
import tt
import scipy.io
import numpy as np
from CME import CME,Gillespie,CompleteObservations,Observations_grid
import matplotlib.pyplot as plt
import scipy.integrate
import tt.amen
import datetime
import sys
import scipy.interpolate
import scipy.stats
from mpl_toolkits import mplot3d
from ttInt import ttInt
from tt_aux import *
import tensorflow as tf
# import tensorflow_probability as tfp
def eval_post(Atts,params,P0,time_observation,observations,obs_operator,eps=1e-7,method = 'cheby',dtmax=0.1,Nmax = 16):
Att = Atts[0]*params[0]
for i in range(1,params.size):
Att += Atts[i]*params[i]
Att = Att.round(1e-12)
qtt = True
if qtt:
A_qtt = ttm2qttm(Att)
integrator = ttInt(A_qtt, epsilon = eps, N_max = Nmax, dt_max = 1.0,method=method)
P = tt2qtt(P0)
else:
integrator = ttInt(Att, epsilon = eps, N_max = Nmax, dt_max = 1.0,method=method)
P = P0
ps = []
for i in range(1,time_observation.size):
dt = time_observation[i]-time_observation[i-1]
# print(i,int(np.ceil(dt/dtmax)))
# tme = timeit.time.time()
P = integrator.solve(P, dt, intervals = int(np.ceil(dt/dtmax)),qtt=True)
Po_tt = obs_operator(observations[i,:],P,time_observation[i])
Po_tt = tt2qtt(Po_tt)
ps.append(tt.dot(Po_tt,P))
P = (P*Po_tt).round(1e-9)
P = P*(1/ps[-1])
# tme = timeit.time.time() - tme
# print(i,' time ',tme,' ',P.r)
ps = np.array(ps)
return ps
def eval_post_full(mdl,params,P0,time_observation,observations,obs_operator,eps=1e-7,method = 'cheby',dtmax=0.1,Nmax = 16):
tme = datetime.datetime.now()
mdl.C = params
mdl.construct_generator2(to_tf=False)
# mdl.construct_generator2(to_tf=True)
Gen = mdl.gen
def func(t,y):
# print(t)
return Gen.dot(y)
# return np.matmul(Gen,y)
# return tf.sparse.sparse_dense_matmul(Gen,y.reshape([-1,1])).numpy().flatten()
tme = datetime.datetime.now()- tme
# print(tme)
P = P0.full()
tme = datetime.datetime.now()
ps = []
for i in range(1,time_observation.size):
dt = time_observation[i]-time_observation[i-1]
# solve CME
res = scipy.integrate.solve_ivp(func,[0,dt],P.flatten())
P = res.y[:,-1].reshape(mdl.size)
Po = obs_operator(observations[i,:],P,time_observation[i]).full()
P = P*Po
Z = np.sum(P)
ps.append(Z)
P = P*(1/Z)
# tme = timeit.time.time() - tme
# print(i,' time ',tme,' ',P.r)
tme = datetime.datetime.now() - tme
# print(tme)
ps = np.array(ps)
return ps
def eval_post_tf(mdl,params,P0,time_observation,observations,obs_operator,eps=1e-7,method = 'cheby',dtmax=0.1,Nmax = 16):
# tme = datetime.datetime.now()
mdl.C = params
mdl.construct_generator2(to_tf=True)
# Gen = tf.sparse.reorder(mdl.gen)
Gen = tf.sparse.to_dense(mdl.gen,validate_indices=False)
def func(t,y):
# print(t)
# return tf.sparse.sparse_dense_matmul(Gen,y)
return Gen @ y
# tme = datetime.datetime.now()- tme
# print(tme)
P = tf.constant(P0.full().reshape([-1,1]))
# tme = datetime.datetime.now()
ps = []
for i in range(1,time_observation.size):
print(i)
dt = time_observation[i]-time_observation[i-1]
# solve CME
results = tfp.math.ode.DormandPrince().solve(func, 0, P, solution_times=[0, dt])
P = results.states[1]
Po = tf.reshape(tf.constant(obs_operator(observations[i,:],P,time_observation[i]).full()),[-1,1])
P = P*Po
Z = tf.reduce_sum(P).numpy()
ps.append(Z)
P = P/Z
# tme = timeit.time.time() - tme
# print(i,' time ',tme,' ',P.r)
# tme = datetime.datetime.now() - tme
# print(tme)
ps = np.array(ps)
return ps |
<filename>vgg_train.py
import torch.optim as optim
import torch.nn as nn
import time
import os
import datetime
from cifar10.tnt_solver import *
from collections import OrderedDict
from collections import namedtuple
from itertools import product
from cifar10.classifiers.vgg import *
import torchnet.meter as meter
import torch
import pandas as pd
import json
from torch.utils.tensorboard import SummaryWriter
# 批训练管理
class RunManager():
#导入数据,设置网络,设置参数,记录训练时间,记录循环周期,记录追踪目标,记录tensorboard日志
def __init__(self):
self.loader = None
self.network = None
self.params = None
self.run_start_time = time.time()
self.epoch_start_time = None
self.run_start_time = None
self.run_count = 0
self.epoch_count = 0
self.epoch_num_correct = 0
self.epoch_loss = 0
self.run_data = []
self.tb = None
def begin_run(self, run, network, loader, val_loader):
self.run_start_time = time.time()
self.run_params = run
self.run_count += 1
self.loader = loader
self.val_loader = val_loader
self.network = network
# 迭代器next()传入数据,按照loader的设置分批传入
# images, labels = next(iter(self.loader))
# images = images.to(device)
# labels = labels.to(device)
# grid = torchvision.utils.make_grid(images)
# f''格式化字符串,{}中表示被替换的内容
# SummaryWriter是给运行日志命名
self.tb = SummaryWriter(comment=f'-{run}')
# self.tb.add_image('images',grid)
# 添加图时,既要有网络,也要有输入
# if len(run.gpus) > 1:
# self.tb.add_graph(
# .module是将模型从Dataparallel中取出来再写入tensorboard,否则并行时会报错。
# self.network.module,
# getattr获得device属性值,没有就输出默认值cpu,都没有则会报错
# images.to(getattr(run, 'device','cpu'))
# images.to('cuda')
# )
# else:
# self.tb.add_graph(self.network,images.to('cuda'))
def end_run(self):
self.tb.close()
# 一个epoch还没有结束,所以epoch_count不计数
self.epoch_count = 0
def begin_epoch(self):
# 初始化本轮epoch中的一些记录点
self.epoch_start_time = time.time()
self.epoch_count += 1
self.epoch_num_correct = 0
self.epoch_loss = 0
# 测试集准确率,观察防止过拟合。一般应该使用验证集
def val(self, network, val_loader):
network.eval()
num_class = 10
confusion_matrix = meter.ConfusionMeter(num_class)
for ii, data in enumerate(val_loader):
images, labels = data
with torch.no_grad():
val_images = images
val_labels = labels
if torch.cuda.is_available():
val_images = val_images.cuda()
val_labels = val_labels.cuda()
score = network(val_images)
confusion_matrix.add(score.data.squeeze(), labels.long())
# 把模型恢复为训练模式
network.train()
cm_value = confusion_matrix.value()
error_sum = 0
for i in range(num_class):
error_sum += cm_value[i][i]
test_accuracy = error_sum / (cm_value.sum())
return confusion_matrix, test_accuracy
def end_epoch(self):
epoch_duration = time.time() - self.epoch_start_time
run_duration = time.time() - self.run_start_time
loss = self.epoch_loss/len(self.loader.dataset)
accuracy = self.epoch_num_correct/len(self.loader.dataset)
confusion_matrix, test_acc = self.val(self.network, self.val_loader)
# add_scalar给tensorboard添加标量数据,对应'名称','Y轴','X轴'
self.tb.add_scalar('Loss', loss, self.epoch_count)
self.tb.add_scalar('Accuracy', accuracy, self.epoch_count)
self.tb.add_scalar('Test_Accuracy', test_acc, self.epoch_count)
print('epoch:',self.epoch_count,'Loss', loss, 'Accuracy', accuracy,'Test_Accuracy',test_acc)
# tensorboard记录网络权重及权重梯度
for name, param in self.network.named_parameters():
self.tb.add_histogram(name, param, self.epoch_count)
self.tb.add_histogram(f'{name}.grad', param.grad, self.epoch_count)
# 训练日志设置
results = OrderedDict()
results['run'] = self.run_count
results['epoch'] = self.epoch_count
results['loss'] = loss
results['accuracy'] = accuracy
results['epoch duration'] = epoch_duration
results['run duration'] = run_duration
for k,v in self.run_params._asdict().items():
results[k] = v
self.run_data.append(results)
def track_loss(self, loss):
self.epoch_loss += loss.item()*self.loader.batch_size
def track_num_correct(self, preds, labels):
self.epoch_num_correct += self._get_num_correct(preds, labels)
# 统计一个批次内的正确个数和损失
@torch.no_grad()
def _get_num_correct(self, preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
def save(self, fileName):
# 把run_data列表里的有序字典按列存储,也就是名称在顶部,依次向下排。
pd.DataFrame.from_dict(
self.run_data,
orient='columns').to_csv(f'{fileName}.csv')
with open(f'{fileName},json','w', encoding='utf-8') as f:
json.dump(self.run_data, f, ensure_ascii=False, indent=4)
# 调参库
class RunBuilder():
# @staticmethod目的是获得静态方法并且不需要实例化
@staticmethod
# 返回的是一个list,里面包含着重组后的具名元组
def get_runs(params):
# Run是一个具名元组的方法,会将传入的参数依次对应到设置的名称下。
Run = namedtuple('run',params.keys())
runs = []
# 参数重组
# *表示自动对应多个参数
for v in product(*params.values()):
runs.append(Run(*v))
return runs
params = OrderedDict(
lr = [.001],
batch_size = [256],
shuffle = [True],
gpus = ['0,1'],
num_worker = [4],
train_split = [0.8],
stratify = [False],
# model = ['VGG9_conv', 'VGG11_conv', 'VGG17_conv']
model = ['VGG9_avgpool', 'VGG11_avgpool', 'VGG17_avgpool']
)
# 训练循环主体
rm = RunManager()
model_name = [VGG9_avgpool(), VGG11_avgpool(), VGG17_avgpool(), VGG9_conv(), VGG11_conv(), VGG17_conv()]
C = CIFAR10Data()
for run in RunBuilder.get_runs(params):
# run依次获得list中的各个具名元组,所以可以将名称作为属性直接调出例如run.batch_size
print(run)
os.environ["CUDA_VISIBLE_DEVICES"] = run.gpus
device = 'cuda' if torch.cuda.is_available() else 'cpu'
load = C.data_split(run.train_split, run.stratify)
loader = C.get_train_loader(batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_worker)
val_loader = C.get_test_loader(batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_worker)
if run.model == 'VGG9_avgpool':
network = model_name[0]
elif run.model == 'VGG11_avgpool':
network = model_name[1]
elif run.model == 'VGG17_avgpool':
network = model_name[2]
elif run.model == 'VGG9_conv':
network = model_name[3]
elif run.model == 'VGG11_conv':
network = model_name[4]
elif run.model == 'VGG17_conv':
network = model_name[5]
if len(run.gpus)>1:
network = nn.DataParallel(network)
print('DataParallel already!')
network = network.to(device)
network.train()
num_params = sum(p.numel() for p in network.parameters() if p.requires_grad)
print('The number of parameters of model is', num_params)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(network.parameters(), lr=run.lr)
rm.begin_run(run, network, loader, val_loader)
for epoch in range(50):
train_loss = 0
correct = 0
total = 0
rm.begin_epoch()
epoch_start = time.time()
for batch_idx, (images, labels) in enumerate(loader):
start = time.time()
images = images.to(device)
labels = labels.to(device)
if run.model in ('VGG9_conv', 'VGG11_conv', 'VGG17_conv'):
pred = network(images)
# print(preds)
preds = torch.squeeze(pred)
# print(preds)
else:
preds = network(images)
# print(preds)
# 实例化损失函数才能使用
loss = loss_fn(preds,labels)
# loss = loss.to(device)
# 总得来说,这三个函数的作用是先将梯度归零optimizer.zero_grad()
# 然后反向传播计算得到每个参数的梯度值loss.backward()
# 最后通过梯度下降执行一步参数更新optimizer.step()
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = preds.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
acc = 100 * correct / total
rm.track_loss(loss)
rm.track_num_correct(preds, labels)
batch_time = time.time() - start
if batch_idx % 20 == 0:
print('Epoch: [{}/{}]| loss: {:.3f} | acc: {:.3f} | batch time: {:.3f}s '.format(
batch_idx, len(loader), train_loss/(batch_idx+1), acc, batch_time))
elapse_time = time.time() - epoch_start
elapse_time = datetime.timedelta(seconds=elapse_time)
print("Training time {}".format(elapse_time))
rm.end_epoch()
rm.end_run()
torch.save(network.state_dict(), "./checkpoint/cifar10_gpu_50_"+ run.model+ ".pth")
rm.save('results_cifar10')
|
<reponame>StuartDAdams/SYCL-CTS<gh_stars>0
#!/usr/bin/env python3
import os
import subprocess
import sys
import xml.etree.ElementTree as ET
import json
import argparse
REPORT_HEADER = """<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet xmlns="http://www.w3.org/1999/xhtml" type="text/xsl" href="#stylesheet"?>
<!DOCTYPE Site [
<!ATTLIST ns0:stylesheet
id ID #REQUIRED>
]>
"""
def handle_args(argv):
"""
Handles the arguements to the script.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--cmake-exe',
help='Name of the CMake executable',
type=str,
default='cmake')
parser.add_argument(
'-a',
'--additional-cmake-args',
help=
'Additional args to hand to CMake required by the tested implementation.',
type=str)
parser.add_argument(
'-b',
'--build-system-name',
help=
'The name of the build system as known by CMake, for example \'Ninja\'.',
type=str,
required=True)
parser.add_argument(
'-c',
'--build-system-call',
help='The call to the used build system.',
type=str,
required=True)
parser.add_argument(
'--build-only',
help='Whether to perform only a build without any testing.',
required=False,
action='store_true')
parser.add_argument(
'-f',
'--conformance-filter',
help='The conformance filter to use.',
type=str,
required=True)
parser.add_argument(
'--host-platform-name',
help='The name of the host platform to test on.',
type=str,
required=True)
parser.add_argument(
'--host-device-name',
help='The name of the host device to test on.',
type=str,
required=True)
parser.add_argument(
'--opencl-platform-name',
help='The name of the opencl platform to test on.',
type=str,
required=True)
parser.add_argument(
'--opencl-device-name',
help='The name of the opencl device to test on.',
type=str,
required=True)
parser.add_argument(
'-n',
'--implementation-name',
help='The name of the implementation to be displayed in the report.',
type=str,
required=True)
parser.add_argument(
'-V',
'--verbose',
help='Enable verbose CTest output',
type=bool,
default=False,
required=False)
args = parser.parse_args(argv)
host_names = (args.host_platform_name, args.host_device_name)
opencl_names = (args.opencl_platform_name, args.opencl_device_name)
return (args.cmake_exe, args.build_system_name, args.build_system_call,
args.conformance_filter, args.implementation_name,
args.additional_cmake_args, host_names, opencl_names, args.verbose,
args.build_only)
def generate_cmake_call(cmake_exe, build_system_name, conformance_filter,
additional_cmake_args, host_names, opencl_names):
"""
Generates a CMake call based on the input in a form accepted by
subprocess.call().
"""
import shlex
return [
cmake_exe,
'..',
'-G' + build_system_name,
'-DSYCL_CTS_TEST_FILTER=' + conformance_filter,
'-Dhost_platform_name=' + host_names[0],
'-Dhost_device_name=' + host_names[1],
'-Dopencl_platform_name=' + opencl_names[0],
'-Dopencl_device_name=' + opencl_names[1],
] + shlex.split(additional_cmake_args)
def subprocess_call(parameter_list):
"""
Calls subprocess.call() with the parameter list.
Prints the invocation before doing the call.
"""
print("subprocess.call:\n %s" % " ".join(parameter_list))
return subprocess.call(parameter_list)
def configure_and_run_tests(cmake_call, build_system_call, verbose,
build_only):
"""
Configures the tests with cmake to produce a ninja.build file.
Runs the generated ninja file.
Runs ctest, overwriting any cached results.
"""
build_system_call = build_system_call.split()
ctest_call = [
'ctest', '.', '-T', 'Test', '--no-compress-output',
'--test-output-size-passed', '0', '--test-output-size-failed', '0'
]
if verbose:
ctest_call.append('-V')
subprocess_call(cmake_call)
error_code = subprocess_call(build_system_call)
if (not build_only):
error_code = subprocess_call(ctest_call)
return error_code
def collect_info_filenames():
"""
Collects all the .info test result files in the Testing directory.
Exits the program if no result files are found.
"""
host_info_filenames = []
opencl_info_filenames = []
# Get all the test results in Testing
for filename in os.listdir('Testing'):
filename_full = os.path.join('Testing', filename)
if filename.endswith('host.info'):
host_info_filenames.append(filename_full)
if filename.endswith('opencl.info'):
opencl_info_filenames.append(filename_full)
# Exit if we didn't find any test results
if (len(host_info_filenames) == 0 or len(opencl_info_filenames) == 0):
print("Fatal error: couldn't find any test result files")
exit(-1)
return host_info_filenames, opencl_info_filenames
def get_valid_host_json_info(host_info_filenames):
"""
Ensures that all the host.info files have the same data, then returns the
parsed json.
"""
reference_host_info = None
for host_info_file in host_info_filenames:
with open(host_info_file, 'r') as host_info:
if reference_host_info is None:
reference_host_info = host_info.read()
elif host_info.read() != reference_host_info:
print('Fatal error: mismatch in host info between tests')
exit(-1)
return json.loads(reference_host_info)
def get_valid_opencl_json_info(opencl_info_filenames):
"""
Ensures that all the opencl.info files have the same data, then returns the
parsed json.
"""
reference_opencl_info = None
for opencl_info_file in opencl_info_filenames:
with open(opencl_info_file, 'r') as opencl_info:
if reference_opencl_info is None:
reference_opencl_info = opencl_info.read()
elif opencl_info.read() != reference_opencl_info:
print('Fatal error: mismatch in OpenCL info between tests')
exit(-1)
# Some drivers add \x00 to their output.
# We have to remove this to parse the json
reference_opencl_info = reference_opencl_info.replace('\x00', '')
return json.loads(reference_opencl_info)
def get_xml_test_results():
"""
Finds the xml file output by the test and returns the rool of the xml tree.
"""
test_tag = ""
with open(os.path.join("Testing", "TAG"), 'r') as tag_file:
test_tag = tag_file.readline()[:-1]
test_xml_file = os.path.join("Testing", test_tag, "Test.xml")
test_xml_tree = ET.parse(test_xml_file)
return test_xml_tree.getroot()
def update_xml_attribs(host_info_json, opencl_info_json, implementation_name,
test_xml_root, cmake_call, build_system_name,
build_system_call):
"""
Adds attributes to the root of the xml trees json required by the
conformance report.
These attributes describe the device and platform information used in the
tests, along with the configuration and execution details of the tests.
"""
# Set Host Device Information attribs
test_xml_root.attrib["BuildName"] = implementation_name
test_xml_root.attrib["HostPlatformName"] = host_info_json['platform-name']
test_xml_root.attrib["HostPlatformVendor"] = host_info_json[
'platform-vendor']
test_xml_root.attrib["HostPlatformVersion"] = host_info_json[
'platform-version']
test_xml_root.attrib["HostDeviceName"] = host_info_json['device-name']
test_xml_root.attrib["HostDeviceVendor"] = host_info_json['device-vendor']
test_xml_root.attrib["HostDeviceVersion"] = host_info_json[
'device-version']
test_xml_root.attrib["HostDeviceType"] = host_info_json['device-type']
# Set Host Device Extension Support attribs
test_xml_root.attrib["HostDeviceFP16"] = host_info_json['device-fp16']
test_xml_root.attrib["HostDeviceFP64"] = host_info_json['device-fp64']
test_xml_root.attrib["HostDeviceInt64Base"] = host_info_json[
'device-int64-base']
test_xml_root.attrib["HostDeviceInt64Extended"] = host_info_json[
'device-int64-extended']
test_xml_root.attrib["HostDevice3DWrites"] = host_info_json[
'device-3d-writes']
# Set OpenCL Device Information attribs
test_xml_root.attrib["OpenCLPlatformName"] = opencl_info_json[
'platform-name']
test_xml_root.attrib["OpenCLPlatformVendor"] = opencl_info_json[
'platform-vendor']
test_xml_root.attrib["OpenCLPlatformVersion"] = opencl_info_json[
'platform-version']
test_xml_root.attrib["OpenCLDeviceName"] = opencl_info_json['device-name']
test_xml_root.attrib["OpenCLDeviceVendor"] = opencl_info_json[
'device-vendor']
test_xml_root.attrib["OpenCLDeviceVersion"] = opencl_info_json[
'device-version']
# Set OpenCL Device Extension Support attribs
test_xml_root.attrib["OpenCLDeviceType"] = opencl_info_json['device-type']
test_xml_root.attrib["OpenCLDeviceFP16"] = opencl_info_json['device-fp16']
test_xml_root.attrib["OpenCLDeviceFP64"] = opencl_info_json['device-fp64']
test_xml_root.attrib["OpenCLDeviceInt64Base"] = opencl_info_json[
'device-int64-base']
test_xml_root.attrib["OpenCLDeviceInt64Extended"] = opencl_info_json[
'device-int64-extended']
test_xml_root.attrib["OpenCLDevice3DWrites"] = opencl_info_json[
'device-3d-writes']
# Set Build Information attribs
test_xml_root.attrib["CMakeInput"] = ' '.join(cmake_call)
test_xml_root.attrib["BuildSystemGenerator"] = build_system_name
test_xml_root.attrib["BuildSystemCall"] = build_system_call
return test_xml_root
def main(argv=sys.argv[1:]):
# Parse and gather all the script args
(cmake_exe, build_system_name, build_system_call, conformance_filter,
implementation_name, additional_cmake_args, host_names, opencl_names,
verbose, build_only) = handle_args(argv)
# Generate a cmake call in a form accepted by subprocess.call()
cmake_call = generate_cmake_call(cmake_exe, build_system_name,
conformance_filter, additional_cmake_args,
host_names, opencl_names)
# Make a build directory if required and enter it
if not os.path.isdir('build'):
os.mkdir('build')
os.chdir('build')
# Configure the build system with cmake, run the build, and run the tests.
error_code = configure_and_run_tests(cmake_call, build_system_call,
verbose, build_only)
if build_only:
return error_code
# Collect the test info files, validate them and get the contents as json.
host_info_filenames, opencl_info_filenames = collect_info_filenames()
host_info_json = get_valid_host_json_info(host_info_filenames)
opencl_info_json = get_valid_opencl_json_info(opencl_info_filenames)
# Get the xml results and update with the necessary information.
result_xml_root = get_xml_test_results()
result_xml_root = update_xml_attribs(
host_info_json, opencl_info_json, implementation_name, result_xml_root,
cmake_call, build_system_name, build_system_call)
# Get the xml report stylesheet and add it to the results.
stylesheet_xml_file = os.path.join("..", "tools", "stylesheet.xml")
stylesheet_xml_tree = ET.parse(stylesheet_xml_file)
stylesheet_xml_root = stylesheet_xml_tree.getroot()
result_xml_root.append(stylesheet_xml_root[0])
# Get the xml results as a string and append them to the report header.
report = REPORT_HEADER + ET.tostring(result_xml_root).decode("utf-8")
with open("conformance_report.xml", 'w') as final_conformance_report:
final_conformance_report.write(report)
return error_code
if __name__ == "__main__":
main()
|
<reponame>OpheliaMiralles/pykelihood<filename>tests/test_stats_utils.py
import numpy as np
import pandas as pd
import pytest
from pykelihood import kernels
from pykelihood.distributions import GEV, Distribution
from pykelihood.stats_utils import Profiler
@pytest.fixture(scope="module")
def likelihood(dataset):
fit = GEV.fit(dataset)
return Profiler(fit, dataset)
@pytest.fixture(scope="module")
def likelihood_with_single_profiling_param(dataset):
fit = GEV.fit(dataset)
return Profiler(fit, dataset, single_profiling_param="shape")
@pytest.fixture(scope="module")
def likelihood_with_fixed_param(dataset):
fit = GEV.fit(dataset, scale=1.0)
return Profiler(fit, dataset)
@pytest.fixture(scope="module")
def likelihood_with_trend(dataset):
fit = GEV.fit(
dataset, loc=kernels.linear(np.linspace(1, len(dataset), len(dataset)))
)
return Profiler(fit, dataset)
def test_mle(likelihood, dataset):
mle, likelihood_opt = likelihood.optimum
assert isinstance(mle, Distribution)
assert likelihood_opt == mle.logpdf(dataset).sum()
# checks whether the maximum likelihood fitted distribution has the same structure as the reference distribution
assert len(likelihood.distribution.flattened_params) == len(mle.flattened_params)
assert len(likelihood.distribution.optimisation_params) == len(
mle.optimisation_params
)
def test_mle_with_trend(likelihood_with_trend, dataset):
mle, likelihood_opt = likelihood_with_trend.optimum
assert isinstance(mle, Distribution)
assert likelihood_opt == mle.logpdf(dataset).sum()
assert len(likelihood_with_trend.distribution.flattened_params) == len(
mle.flattened_params
)
assert len(likelihood_with_trend.distribution.optimisation_params) == len(
mle.optimisation_params
)
def test_mle_with_fixed_param(likelihood_with_fixed_param, dataset):
mle, likelihood_opt = likelihood_with_fixed_param.optimum
assert isinstance(mle, Distribution)
assert likelihood_opt == mle.logpdf(dataset).sum()
assert len(likelihood_with_fixed_param.distribution.flattened_params) == len(
mle.flattened_params
)
assert len(likelihood_with_fixed_param.distribution.optimisation_params) == len(
mle.optimisation_params
)
def test_profiles(likelihood):
profiles = likelihood.profiles
mle, likelihood_opt = likelihood.optimum
# checks that the profiling is made on optimized params and not on fixed ones
assert len(profiles) == len(mle.optimisation_params)
for key in profiles:
# if the likelihood is very concave in one of the parameter, moving slightly away from the MLE can engender a too big deviation from the optimal likelihood value
if len(profiles[key]):
# the max likelihood estimate should provide the biggest likelihood for the same set of data and the same assumed distribution structure
assert pd.Series((profiles[key]["score"] <= likelihood_opt)).all()
# a profile is a combination of the parameters of the distribution obtained by fixing one parameter (the one that is being profiled) and
# fitting the MLE for the sample data and the likelihood value: it should provide a complete view of the fit and therefore contains all
# of the parameters, even the ones that do not intervene in the optimisation at all
assert len(profiles[key].columns) == len(mle.flattened_params) + 1
# the mle for this parameter should be within the bounds found by varying it
assert (
profiles[key][key].min()
<= mle.optimisation_param_dict[key]()
<= profiles[key][key].max()
)
def test_profiles_with_single_profiling_param(likelihood_with_single_profiling_param):
single_param_profiles = likelihood_with_single_profiling_param.profiles
assert len(single_param_profiles) == 1
assert "shape" in single_param_profiles
def test_profiles_with_trend(likelihood_with_trend):
profiles = likelihood_with_trend.profiles
mle, likelihood_opt = likelihood_with_trend.optimum
assert len(profiles) == len(mle.optimisation_params)
for key in profiles:
if len(profiles[key]):
np.testing.assert_array_less(profiles[key]["score"], likelihood_opt)
assert len(profiles[key].columns) == len(mle.flattened_params) + 1
assert (
profiles[key][key].min()
<= mle.optimisation_param_dict[key]()
<= profiles[key][key].max()
)
def test_profiles_with_fixed_param(likelihood_with_fixed_param):
profiles = likelihood_with_fixed_param.profiles
mle, likelihood_opt = likelihood_with_fixed_param.optimum
assert len(profiles) == len(mle.optimisation_params)
for key in profiles:
if len(profiles[key]):
assert pd.Series((profiles[key]["score"] <= likelihood_opt)).all()
assert len(profiles[key].columns) == len(mle.flattened_params) + 1
assert (
profiles[key][key].min()
<= mle.optimisation_param_dict[key]()
<= profiles[key][key].max()
)
def test_confidence_interval(likelihood, likelihood_with_single_profiling_param):
return_period = 50
mle, likelihood_opt = likelihood.optimum
def metric(distribution):
return distribution.isf(1 / return_period)
estimated_level = metric(mle)
CI = likelihood.confidence_interval(metric)
single_param_CI = likelihood_with_single_profiling_param.confidence_interval(metric)
assert CI[0] <= estimated_level <= CI[1]
assert CI[0] <= single_param_CI[0]
# profiling according to only one parameter gives less wide and less reliable confidence intervals
assert CI[1] >= single_param_CI[1]
|
# This is a sample mean-reversion algorithm on Quantopian for you to test and adapt.
# Algorithm investment thesis:
# Top-performing stocks from last week will do worse this week, and vice-versa.
# Every Monday, we rank high-volume stocks based on their previous 5 day returns.
# We go long the bottom 20% of stocks with the WORST returns over the past 5 days.
# We go short the top 20% of stocks with the BEST returns over the past 5 days.
# This type of algorithm may be used in live trading and in the Quantopian Open.
# Import the libraries we will use here
from zipline.api import get_open_orders
import numpy as np
# The initialize function is the place to set your tradable universe and define any parameters.
def initialize(context):
# Use the top 1% of stocks defined by average daily trading volume.
set_universe(universe.DollarVolumeUniverse(99, 100))
# Set execution cost assumptions. For live trading with Interactive Brokers
# we will assume a $1.00 minimum per trade fee, with a per share cost of $0.0075.
set_commission(commission.PerShare(cost=0.0075, min_trade_cost=1.00))
# Set market impact assumptions. We limit the simulation to
# trade up to 2.5% of the traded volume for any one minute,
# and our price impact constant is 0.1.
set_slippage(slippage.VolumeShareSlippage(volume_limit=0.025, price_impact=0.10))
# Define the other variables
context.long_leverage = 0.5
context.short_leverage = -0.5
context.lower_percentile = 20
context.upper_percentile = 80
context.returns_lookback = 5
# Rebalance every Monday (or the first trading day if it's a holiday).
# At 11AM ET, which is 1 hour and 30 minutes after market open.
schedule_function(rebalance,
date_rules.week_start(days_offset=0),
time_rules.market_open(hours = 1, minutes = 30))
# The handle_data function is run every bar.
def handle_data(context,data):
# Record and plot the leverage of our portfolio over time.
record(leverage = context.account.leverage)
# We also want to monitor the number of long and short positions
# in our portfolio over time. This loop will check our positition sizes
# and add the count of longs and shorts to our plot.
longs = shorts = 0
for position in context.portfolio.positions.itervalues():
if position.amount > 0:
longs += 1
if position.amount < 0:
shorts += 1
record(long_count=longs, short_count=shorts)
# This rebalancing is called according to our schedule_function settings.
def rebalance(context,data):
# Get the last N days of prices for every stock in our universe.
prices = history(context.returns_lookback, '1d', 'price')
# Calculate the past 5 days' returns for each security.
returns = (prices.iloc[-1] - prices.iloc[0]) / prices.iloc[0]
# Remove stocks with missing prices.
# Remove any stocks we ordered last time that still have open orders.
# Get the cutoff return percentiles for the long and short portfolios.
returns = returns.dropna()
open_orders = get_open_orders()
if open_orders:
eligible_secs = [sec for sec in data if sec not in open_orders]
returns = returns[eligible_secs]
# Lower percentile is the threshhold for the bottom 20%, upper percentile is for the top 20%.
lower, upper = np.percentile(returns, [context.lower_percentile,
context.upper_percentile])
# Select the X% worst performing securities to go long.
long_secs = returns[returns <= lower]
# Select the Y% best performing securities to short.
short_secs = returns[returns >= upper]
# Set the allocations to even weights in each portfolio.
long_weight = context.long_leverage / len(long_secs)
short_weight = context.short_leverage / len(short_secs)
for security in data:
# Buy/rebalance securities in the long leg of our portfolio.
if security in long_secs:
order_target_percent(security, long_weight)
# Sell/rebalance securities in the short leg of our portfolio.
elif security in short_secs:
order_target_percent(security, short_weight)
# Close any positions that fell out of the list of securities to long or short.
else:
order_target(security, 0)
log.info("This week's longs: "+", ".join([long_.symbol for long_ in long_secs.index]))
log.info("This week's shorts: " +", ".join([short_.symbol for short_ in short_secs.index]))
|
import os
import glob
# Our numerical workhorses
import numpy as np
import pandas as pd
import scipy.special
# Import the project utils
import sys
sys.path.insert(0, '../')
import image_analysis_utils as im_utils
# Useful plotting libraries
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import seaborn as sns
# Image analysis libraries
import skimage.io
import skimage.filters
import skimage.segmentation
import scipy.ndimage
# Set plotting style
im_utils.set_plotting_style()
# =============================================================================
# METADATA
# =============================================================================
DATE = 20180330
USERNAME = 'mrazomej'
OPERATOR = 'O3'
BINDING_ENERGY = -9.7
REPRESSORS = (0, 0, 870)
IPDIST = 0.160 # in units of µm per pixel
STRAINS = ['auto', 'delta', 'RBS1L']
IPTG_RANGE = (0, 0.1, 5, 10, 25, 50, 100, 250, 500, 1000, 5000)
# Extra feature because my mistake when naming files
IPTG_NAMES = ('0', '0.1', '5', '10', '25', '50', '100', '250', '500',
'1000', '5000')
IPTG_DICT = dict(zip(IPTG_NAMES, IPTG_RANGE))
# =============================================================================
# Define the data directory.
data_dir = '../../../data/microscopy/' + str(DATE) + '/'
# Glob the profile and noise images.
yfp_glob = glob.glob(data_dir + '*YFP_profile*/*.tif')
noise_glob = glob.glob(data_dir + '*noise*/*.tif')
# Load the images as collections
yfp_profile = skimage.io.ImageCollection(yfp_glob)
noise_profile = skimage.io.ImageCollection(noise_glob)
# Need to split the noise profile image into the two channels
noise_yfp = [noise_profile[i] for i, _ in enumerate(noise_profile)]
# Generate averages and plot them.
yfp_avg = im_utils.average_stack(yfp_profile)
yfp_noise = im_utils.average_stack(noise_yfp)
with sns.axes_style('white'):
fig, ax = plt.subplots(1, 2, figsize=(6, 3))
ax = ax.ravel()
ax[0].imshow(yfp_avg, cmap=plt.cm.viridis)
ax[0].set_title('yfp profile')
ax[1].imshow(yfp_noise, cmap=plt.cm.Greens_r)
ax[1].set_title('yfp noise')
plt.tight_layout()
plt.savefig('./outdir/background_correction.png')
# =============================================================================
# Iterate through each strain and concentration to make the dataframes.
dfs = []
# Select random IPTG and random strain to print the example segmentation
ex_iptg = np.random.choice(IPTG_RANGE)
ex_strain = STRAINS[-1]
for i, st in enumerate(STRAINS):
print(st)
for j, name in enumerate(IPTG_NAMES):
iptg = IPTG_DICT[name]
# Load the images
images = glob.glob(data_dir + '*' + st + '*_' + name +
'uMIPTG*/*.ome.tif')
if len(images) is not 0:
print(name)
ims = skimage.io.ImageCollection(images)
# Select random image to print example segmentation
ex_no = np.random.choice(np.arange(0, len(images) - 1))
for z, x in enumerate(ims):
_, m, y = im_utils.ome_split(x)
y_flat = im_utils.generate_flatfield(y, yfp_noise, yfp_avg)
# Segment the mCherry channel.
m_seg = im_utils.log_segmentation(m, label=True)
# Print example segmentation for the random image
if (st == ex_strain) & (iptg == ex_iptg) & (z == ex_no):
merge = im_utils.example_segmentation(m_seg, _, 10/IPDIST)
skimage.io.imsave('./outdir/example_segmentation.png',
merge)
# Extract the measurements.
try:
im_df = im_utils.props_to_df(m_seg,
physical_distance=IPDIST,
intensity_image=y_flat)
except ValueError:
break
# Add strain and IPTG concentration information.
im_df.insert(0, 'IPTG_uM', iptg)
im_df.insert(0, 'repressors', REPRESSORS[i])
im_df.insert(0, 'rbs', st)
im_df.insert(0, 'binding_energy', BINDING_ENERGY)
im_df.insert(0, 'operator', OPERATOR)
im_df.insert(0, 'username', USERNAME)
im_df.insert(0, 'date', DATE)
# Append the dataframe to the global list.
dfs.append(im_df)
# Concatenate the dataframe
df_im = pd.concat(dfs, axis=0)
df_im.to_csv('./outdir/' + str(DATE) + '_' + OPERATOR + '_' +
STRAINS[-1] + '_raw_segmentation.csv', index=False)
|
import unittest
import pyrepscan
class RulesManagerTestCase(
unittest.TestCase,
):
def test_should_scan_file_ignored_extensions(
self,
):
rules_manager = pyrepscan.RulesManager()
self.assertTrue(
expr=rules_manager.should_scan_file_path('file.txt'),
)
rules_manager.add_file_extension_to_skip('txt')
self.assertFalse(
expr=rules_manager.should_scan_file_path('file.txt'),
)
rules_manager.add_file_extension_to_skip('pdf')
self.assertFalse(
expr=rules_manager.should_scan_file_path('file.txt'),
)
self.assertFalse(
expr=rules_manager.should_scan_file_path('file.pdf'),
)
self.assertFalse(
expr=rules_manager.should_scan_file_path('file.other.pdf'),
)
self.assertTrue(
expr=rules_manager.should_scan_file_path('file.pdf.other'),
)
self.assertTrue(
expr=rules_manager.should_scan_file_path('file.doc'),
)
def test_should_scan_file_ignored_file_paths(
self,
):
rules_manager = pyrepscan.RulesManager()
self.assertTrue(
expr=rules_manager.should_scan_file_path('/site-packages/file.txt'),
)
rules_manager.add_file_path_to_skip('site-packages')
self.assertFalse(
expr=rules_manager.should_scan_file_path('/site-packages/file.txt'),
)
self.assertTrue(
expr=rules_manager.should_scan_file_path('/folder_one/subfolder/file.txt'),
)
rules_manager.add_file_path_to_skip('folder_one/subfolder')
self.assertFalse(
expr=rules_manager.should_scan_file_path('/folder_one/subfolder/file.txt'),
)
self.assertTrue(
expr=rules_manager.should_scan_file_path('/folder_one/sub/file.txt'),
)
rules_manager.add_file_path_to_skip('part/name')
self.assertFalse(
expr=rules_manager.should_scan_file_path('some_part/name_some'),
)
def test_add_content_rule_one(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'([a-z]+)',
whitelist_patterns=[],
blacklist_patterns=[],
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='',
content='first line\nsecond line\nthird line',
),
second=[
{
'match_text': 'first',
'rule_name': 'rule_one',
},
{
'match_text': 'line',
'rule_name': 'rule_one',
},
{
'match_text': 'second',
'rule_name': 'rule_one',
},
{
'match_text': 'line',
'rule_name': 'rule_one',
},
{
'match_text': 'third',
'rule_name': 'rule_one',
},
{
'match_text': 'line',
'rule_name': 'rule_one',
},
],
)
def test_add_content_rule_two(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'([a-z]+)',
whitelist_patterns=[],
blacklist_patterns=[
r'line',
],
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='',
content='first line\nsecond line\nthird line',
),
second=[
{
'match_text': 'first',
'rule_name': 'rule_one',
},
{
'match_text': 'second',
'rule_name': 'rule_one',
},
{
'match_text': 'third',
'rule_name': 'rule_one',
},
],
)
def test_add_content_rule_three(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'([a-z]+)',
whitelist_patterns=[
'second',
'third',
],
blacklist_patterns=[],
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='',
content='first line\nsecond line\nthird line',
),
second=[
{
'match_text': 'second',
'rule_name': 'rule_one',
},
{
'match_text': 'third',
'rule_name': 'rule_one',
},
],
)
def test_add_content_rule_four(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'([a-z]+)',
whitelist_patterns=[
'second',
'third',
],
blacklist_patterns=[
r'nd$',
],
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='',
content='first line\nsecond line\nthird line',
),
second=[
{
'match_text': 'third',
'rule_name': 'rule_one',
},
],
)
def test_add_content_rule_five(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(nothing)',
whitelist_patterns=[],
blacklist_patterns=[],
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='',
content='first line\nsecond line\nthird line',
),
)
def test_add_content_rule_exceptions(
self,
):
rules_manager = pyrepscan.RulesManager()
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='',
pattern=r'regex',
whitelist_patterns=[],
blacklist_patterns=[],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_one',
pattern=r'',
whitelist_patterns=[],
blacklist_patterns=[],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(',
whitelist_patterns=[],
blacklist_patterns=[],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_one',
pattern=r'regex_pattern_without_capturing_group',
whitelist_patterns=[],
blacklist_patterns=[],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_two',
pattern=r'(content)',
whitelist_patterns=[],
blacklist_patterns=[
'(',
],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_two',
pattern=r'(content)',
whitelist_patterns=[],
blacklist_patterns=[
'(blacklist_regex_with_capturing_group)',
],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_two',
pattern=r'(content)',
whitelist_patterns=[
'(',
],
blacklist_patterns=[],
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_content_rule(
name='rule_two',
pattern=r'(content)',
whitelist_patterns=[
'(whitelist_regex_with_capturing_group)',
],
blacklist_patterns=[],
)
def test_add_file_path_rule_one(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_file_path_rule(
name='rule_one',
pattern=r'(prod|dev|stage).+key',
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='workdir/prod/some_file',
content=None,
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='workdir/prod/some_file.key',
content=None,
),
second=[
{
'match_text': 'workdir/prod/some_file.key',
'rule_name': 'rule_one',
},
],
)
rules_manager.add_file_path_rule(
name='rule_two',
pattern=r'prod.+key',
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='workdir/prod/some_file',
content=None,
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='workdir/prod/some_file.key',
content=None,
),
second=[
{
'match_text': 'workdir/prod/some_file.key',
'rule_name': 'rule_one',
},
{
'match_text': 'workdir/prod/some_file.key',
'rule_name': 'rule_two',
},
],
)
def test_add_file_path_rule_exceptions(
self,
):
rules_manager = pyrepscan.RulesManager()
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_file_path_rule(
name='',
pattern=r'regex',
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_file_path_rule(
name='rule_one',
pattern=r'',
)
def test_add_file_extension_to_skip_exceptions(
self,
):
rules_manager = pyrepscan.RulesManager()
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_file_extension_to_skip(
file_extension='',
)
def test_add_file_path_to_skip_exceptions(
self,
):
rules_manager = pyrepscan.RulesManager()
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.add_file_path_to_skip(
file_path='',
)
def test_scan_file_one(
self,
):
rules_manager = pyrepscan.RulesManager()
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content=None,
),
)
def test_scan_file_two(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(some_text)',
whitelist_patterns=[],
blacklist_patterns=[],
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content=None,
),
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='',
),
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='other_text',
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_text',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_text',
},
],
)
rules_manager.add_content_rule(
name='rule_two',
pattern=r'(some)',
whitelist_patterns=[],
blacklist_patterns=[],
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_text',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_text',
},
{
'rule_name': 'rule_two',
'match_text': 'some',
},
],
)
def test_scan_file_three(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(some_.+)',
whitelist_patterns=[],
blacklist_patterns=[
r'text',
],
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_text',
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_other',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_other',
},
],
)
def test_scan_file_four(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(some_.+)',
whitelist_patterns=[],
blacklist_patterns=[
r'text',
r'other',
],
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_text',
),
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_other',
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_diff',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_diff',
},
],
)
def test_scan_file_five(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(some_.+)',
whitelist_patterns=[
'diff',
],
blacklist_patterns=[],
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_text',
),
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_other',
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_diff',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_diff',
},
],
)
def test_scan_file_six(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_content_rule(
name='rule_one',
pattern=r'(some_.+)',
whitelist_patterns=[
'diff',
'other',
],
blacklist_patterns=[],
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_text',
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_other',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_other',
},
],
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='some_diff',
),
second=[
{
'rule_name': 'rule_one',
'match_text': 'some_diff',
},
],
)
def test_scan_file_seven(
self,
):
rules_manager = pyrepscan.RulesManager()
rules_manager.add_file_path_rule(
name='rule_one',
pattern=r'dev\.txt',
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content=None,
),
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='',
),
)
self.assertIsNone(
obj=rules_manager.scan_file(
file_path='/path/to/file.txt',
content='other_text',
),
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/dev.txt',
content='',
),
second=[
{
'rule_name': 'rule_one',
'match_text': '/path/to/dev.txt',
},
],
)
rules_manager.add_file_path_rule(
name='rule_two',
pattern=r'(\.txt)',
)
self.assertEqual(
first=rules_manager.scan_file(
file_path='/path/to/dev.txt',
content='some_text',
),
second=[
{
'rule_name': 'rule_one',
'match_text': '/path/to/dev.txt',
},
{
'rule_name': 'rule_two',
'match_text': '/path/to/dev.txt',
},
],
)
def test_check_pattern(
self,
):
rules_manager = pyrepscan.RulesManager()
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.check_pattern(
content='',
pattern=r'(',
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.check_pattern(
content='',
pattern=r'no_capturing_group',
)
with self.assertRaises(
expected_exception=RuntimeError,
):
rules_manager.check_pattern(
content='',
pattern=r'(?:\:)',
)
self.assertEqual(
first=rules_manager.check_pattern(
content='some sentence',
pattern=r'([^ ]+)',
),
second=[
'some',
'sentence',
]
)
|
<reponame>Ornendil/logout<filename>logout2.py
#!/usr/bin/python3
# coding: utf-8
#Innstillinger
# Hvor mange sekunder med inaktivitet før brukeren får beskjed om at han snart logges ut
loggUtBeskjedTid = 1 * 60
# Hvor mange sekunder etter det igjen før brukeren logges ut
loggUtTid = 4 * 60
import cairo
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, Gdk, GLib, Pango
from threading import Timer, Thread, Event
from datetime import datetime, timedelta
import subprocess
from subprocess import run
import locale
spraak = locale.getlocale()
print(spraak)
if 'nb_NO' in spraak:
logoutButtonText = 'Logg ut og slett alt'
inactiveUserText = 'Inaktiv bruker'
tilLogoutText = 'til utlogging'
loggedOnText = 'Tid pålogget:'
else:
logoutButtonText = 'Logout and delete everything'
inactiveUserText = 'Inactive user'
tilLogoutText = 'until logout'
loggedOnText = 'Time logged on:'
timeout = 0
idleTime = 0
fullScreen = 'false'
class TransparentWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Utloggings-timer : Ullensaker bibliotek")
self.set_border_width(8)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
box.set_name('box')
self.add(box)
# Lag delene av vinduet
self.tekst1 = Gtk.Label(label="")
self.tekst1.set_name('tekst1')
box.pack_start(self.tekst1, True, True, 0)
self.counter = Gtk.Label(label="")
self.counter.set_name('counter')
box.pack_start(self.counter, True, True, 0)
self.tekst2 = Gtk.Label(label="")
self.tekst2.set_name('tekst2')
box.pack_start(self.tekst2, True, True, 0)
self.logoutButton = Gtk.Button(label=logoutButtonText)
self.logoutButton.set_name('logoutButton')
self.logoutButton.set_can_focus(False)
self.logoutButton.connect("clicked", self.logOff)
box.pack_start(self.logoutButton, True, True, 0)
self.gtk_style()
self.startTime = self.currentTime()
self.clock()
global timeout
timeout = GLib.timeout_add_seconds(1, self.clock)
self.connect('destroy', Gtk.main_quit)
self.set_decorated(False)
self.set_skip_taskbar_hint(True)
self.set_keep_above('true')
self.connect('draw', self.draw)
# Gjør det mulig å kunne dra viduet rundt
def moveMainWindow(self,event):
self.begin_move_drag(event.button, event.x_root, event.y_root, event.get_time())
self.connect('button_press_event', moveMainWindow)
# Sett størrelsen og posisjonen på vinduet
screen = self.get_screen()
self.set_default_size(150,120)
width, height = self.get_size()
if 'ar_SA' in spraak:
self.set_gravity(Gdk.Gravity.SOUTH_WEST)
self.move(14, screen.get_height() - height - 32)
else:
self.set_gravity(Gdk.Gravity.SOUTH_EAST)
self.move(screen.get_width() - width - 10, screen.get_height() - height - 37)
visual = screen.get_rgba_visual()
if visual and screen.is_composited():
self.set_visual(visual)
self.set_app_paintable(True)
self.show_all()
def clock(self):
tid = self.currentTime() - self.startTime
global idleTime
global fullScreen
# Hvis brukeren ser på film eller no sånt, sett idle time til null
if self.isFullScreen() == 'true':
idleTime = 0
fullScreen = 'true'
# Men hvis brukeren slutter å se på film
elif self.isFullScreen() != 'true' and fullScreen == 'true':
fullscreen = 'false'
run("/opt/logout/fakeinput.sh")
idleTime = self.idleTimer()
# Ellers la klokka gå
else:
idleTime = self.idleTimer()
if idleTime >= loggUtBeskjedTid:
secondsLeft = timedelta(seconds = (loggUtBeskjedTid + loggUtTid) - int(idleTime))
self.tekst1.set_text(inactiveUserText)
self.counter.set_text(str(secondsLeft))
self.tekst2.set_text(tilLogoutText)
else:
self.tekst1.set_text(loggedOnText)
self.counter.set_text(str( timedelta(seconds = tid.seconds )))
self.tekst2.set_text("")
if idleTime >= loggUtTid + loggUtBeskjedTid:
self.logOff('self')
return 'false'
else:
return 'true'
def currentTime(self):
return datetime.now()
# Funksjon som skjekker om det finnes et vindu som kjører i fullskjerm og svarer "true" hvis det er det
def isFullScreen(self):
full = run("/opt/logout/isfullscreen.sh", stdout=subprocess.PIPE).stdout.decode('utf-8').strip(' \t\n\r')
return full
# Funksjon som logger ut brukeren hardt og brutalt
def logOff(self, widget):
GLib.source_remove(timeout)
run("/opt/logout/loggut.sh")
def idleTimer(self):
return float(run("/opt/logout/idle.sh", stdout=subprocess.PIPE).stdout.decode('utf-8')) / 1000
def gtk_style(self):
style_provider = Gtk.CssProvider()
css = open('/opt/logout/style.css')
css_data = css.read().encode()
style_provider.load_from_data(css_data)
css.close()
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def draw(self, widget, context):
context.set_source_rgba(0.75, 0.75, 0.75, 0.8)
context.set_operator(cairo.OPERATOR_SOURCE)
context.paint()
context.set_operator(cairo.OPERATOR_OVER)
TransparentWindow()
Gtk.main()
|
from flask import Flask, flash, redirect, render_template, request, url_for, send_file
from flask_wtf import Form, FlaskForm
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField, SelectField, IntegerField
from flask_wtf.file import FileField, FileAllowed, FileRequired
from werkzeug import secure_filename
import os
import metquest as mq
import json
import cobra
import webbrowser
from cobra.io import load_json_model
from cobra.core import Metabolite, Reaction, Model
from d3flux import flux_map
import matplotlib.pyplot as plot
from collections import Counter
import io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from datetime import datetime
ALLOWED_EXTENSIONS = set(['xml'])
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '<KEY>'
app.config['UPLOAD_PATH'] = os.path.join(os.getcwd(), datetime.now().strftime('KOFILES_%d-%m-%Y_%I:%M:%S'))
if not os.path.exists(os.path.join(app.config['UPLOAD_PATH'])):
os.makedirs(os.path.join(app.config['UPLOAD_PATH']))
print('file made')
class Knock(Form):
length = TextField('Cut-off:', validators=[validators.required()])
knock_out = TextField('Reaction to be knocked-out:', validators=[validators.required()])
seeds = TextField('Seed Metabolites:', validators=[validators.required()])
target = TextField('Target Metabolite:', validators=[validators.required()])
def reset(self):
blankData = MultiDict([ ('csrf', self.reset_csrf() ) ])
self.process(blankData)
pathsHTML = []
cobra_mods = []
modids = []
fnames = []
class UploadForm(FlaskForm):
upload = FileField(validators=[FileRequired()])
@app.route('/')
def inputs():
global modids
global fnames
modids = []
fnames = []
uploadForm = UploadForm(request.form)
knockForm = Knock(request.form)
return render_template('knockUpload.html', uploadForm = uploadForm, knockForm = knockForm )
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploader', methods = ['GET', 'POST'])
def uploader():
uploadForm = UploadForm(request.form)
global modids
global fnames
global cobra_mods
cobra_mods = []
modids = []
fnames = []
if request.method == 'POST' and 'file' in request.files:
listof = request.files.getlist('file')
if len(listof) == 0:
flash('Error : No selected file')
print(uploadForm.errors)
return render_template('knockUpload.html')
if len(listof) > 0:
rxn_in_model = []
j = 0
lentrack = 0
for f in listof:
if allowed_file(f.filename):
filename = secure_filename(f.filename)
fnames.append(filename)
f.save(os.path.join(app.config['UPLOAD_PATH'], filename))
modl=cobra.io.read_sbml_model(os.path.join(app.config['UPLOAD_PATH'], filename))
cobra_mods.append(modl)
if modl:
modids.append(modl.id)
for i in range(len(modl.reactions)):
rxn_in_model.append({'label':"", 'value':"", 'category': modl.id})
while j < (lentrack + len(modl.reactions)):
for reacts in modl.reactions:
rxn_in_model[j]['label'] = str(reacts.name) + " ( " + str(reacts.id) + " )"
rxn_in_model[j]['value'] = str(modl.id) + " " + str(reacts.id)
j = j+1
lentrack = lentrack + len(modl.reactions)
else:
flash('Error : Model %s not valid. Upload a .xml model file.'%(filename))
print(uploadForm.errors)
return render_template('knockUpload.html')
else:
flash('Error : Model %s not valid. Upload a .xml model file.'%(f.filename))
print(uploadForm.errors)
return render_template('knockUpload.html')
flash('Model Uploaded')
return render_template('knockUpload.html', rxn_in_model = rxn_in_model, fnames = fnames)
else:
flash('Error : No file selected')
print(uploadForm.errors)
return render_template('knockUpload.html')
@app.route("/test", methods=['GET', 'POST'])
def test():
knockForm = Knock(request.form)
global modids
if request.method == 'POST':
#if request.form.post['action'] == 'make_paths':
if knockForm.validate():
global modids
if len(modids) != 0 :
cut_len = request.form['length']
cut_len = int(cut_len)
knockout = request.form['knock_out']
knockout = list(set(knockout.split(",")))
for i in knockout:
if i == "":
knockout.remove(i)
seed_met = request.form['seeds']
seed_met = seed_met.split(",")
seed_met = set(seed_met)
tar_met = request.form['target']
tar_met = tar_met.split(",")
for i in tar_met:
if i == "":
tar_met.remove(i)
G, namemap = mq.create_graph(os.path.join(app.config['UPLOAD_PATH']),len(modids))
print('Graph made')
pathways, cyclic, scope = mq.find_pathways(G,seed_met,cut_len)
print('pathways found')
all_reactions = {}
freq = {}
check = 0
for i in range(len(tar_met)):
all_reactions[tar_met[i]] = []
pred = G.predecessors
succ = G.successors
if tar_met[i] in pathways:
all_reactions_involved = []
for plen in pathways[tar_met[i]]:
for paths in pathways[tar_met[i]][plen]:
for reactions in paths:
all_reactions_involved.append(namemap[reactions])
all_reactions[tar_met[i]] = all_reactions_involved
freq[tar_met[i]] = dict(Counter(all_reactions[tar_met[i]]))
for keys,values in freq[tar_met[i]].items():
freq[tar_met[i]][keys] = values/len(all_reactions[tar_met[i]])
check = check + len(all_reactions[tar_met[i]])
if check == 0 :
flash('Error : No pathways could be found. Consider changing the cut-off or the seed metabolite set.')
print(knockForm.errors)
return render_template('knockUpload.html', knockForm = knockForm)
else:
print("knocking out reaction(s)")
mods_to_knock_from = {}
for i in knockout:
mods_to_knock_from[i.split(" ")[0]] = []
for i in knockout:
mods_to_knock_from[i.split(" ")[0]].append(i.split(" ")[1])
for keys, values in mods_to_knock_from.items():
for j in cobra_mods:
if keys == j.id:
j.remove_reactions(values, True)
Jid = j.id + "-" + "-".join(values)
modids = []
modids.append(Jid)
fold_name = "_".join(modids)
if not os.path.exists(os.path.join(app.config['UPLOAD_PATH'], fold_name)):
os.makedirs(os.path.join(app.config['UPLOAD_PATH'], fold_name))
for i in cobra_mods:
cobra.io.write_sbml_model(i, os.path.join(app.config['UPLOAD_PATH'], fold_name + "/" + i.id + ".xml"))
G_k, namemap_k = mq.create_graph(os.path.join(app.config['UPLOAD_PATH'], fold_name), len(modids))
pathways_k, cyclic_k, scope_k = mq.find_pathways(G_k,seed_met,cut_len)
all_reactions_k = {}
freq_k = {}
final_freq_k ={}
check1 = 0
for i in range(len(tar_met)):
all_reactions_k[tar_met[i]] = []
if tar_met[i] in pathways_k:
all_reactions_involved_k = []
for plen in pathways_k[tar_met[i]]:
for paths in pathways_k[tar_met[i]][plen]:
for reactions in paths:
all_reactions_involved_k.append(namemap_k[reactions])
all_reactions_k[tar_met[i]] = all_reactions_involved_k
freq_k[tar_met[i]] = dict(Counter(all_reactions_k[tar_met[i]]))
for keys,values in freq_k[tar_met[i]].items():
freq_k[tar_met[i]][keys] = values/len(all_reactions_k[tar_met[i]])
final_freq_k[tar_met[i]] = {}
y1 = []
x = list(freq[tar_met[i]].keys())
y2 = []
for keys, values in freq[tar_met[i]].items():
final_freq_k[tar_met[i]][keys] = 0
for keys, values in freq_k[tar_met[i]].items():
final_freq_k[tar_met[i]][keys] = values
for react in x:
y1.append(freq[tar_met[i]][react])
y2.append(final_freq_k[tar_met[i]][react])
import numpy as np
x_nums = np.arange(1,len(freq[tar_met[i]])+1)
fig, ax = plot.subplots(figsize=(15,50))
p1 = ax.barh(x_nums + 0.25, y1, 0.25, align ='center', color="b", label='Before Knockout')
p2 = ax.barh(x_nums, y2, 0.25, align ='center',color= "r", label='After Knockout')
ax.set(yticks = x_nums + 0.25/2, yticklabels=x)
ax.legend()
ax.autoscale_view()
img = io.BytesIO()
plot.savefig(img)
img.seek(0)
check1 = check1 + len(all_reactions_k[tar_met[i]])
return send_file(img, mimetype='image/png')
else:
flash('Error : Model file not uploaded. Remember to upload after selecting the .xml model file.')
print(knockForm.errors)
return render_template('knockUpload.html', knockForm = knockForm)
else:
flash('Error : All the form fields are required. ')
print(knockForm.errors)
return render_template('knockUpload.html', knockForm = knockForm)
if __name__ == "__main__":
url = 'http://127.0.0.1:5002'
webbrowser.open_new(url)
app.run()
|
import sys, os, json, io, csv
# from django.http import Http404
# from django.shortcuts import get_object_or_404, render
# from django.http import JsonResponse, HttpResponse
# from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
import traceback
import pymongo
from urllib.parse import parse_qs
import djhelpers as dj
import endpoints
epdir = dir(endpoints)
funcmap={}
for ep in endpoints.djangle_endpoints:
funcs = [x for x in epdir if x[:len(ep)+1] == ep+"_"]
funcmap.update({x[len(ep)+1:]: endpoints.__dict__[x] for x in funcs})
funcnames=funcmap.keys()
print ("funcmap:", funcnames, funcmap)
def parse_qstring(s):
q = parse_qs(s)
for key, val in q.items():
if type(val)==list and len(val)==1:
q[key] = val[0]
return q
@csrf_exempt
def home(request):
# print ("HOME", request)
try:
endpt = request.get_full_path()
rawquery = ""
if "?" in endpt:
rawquery = endpt[endpt.find("?")+1:]
endpt = endpt[:endpt.find("?")]
parts = [x for x in endpt.split("/") if x != ""]
if parts==["favicon.ico"]:
return dj.html("")
# print ("PARTS:",parts)
if len(parts)<1:
# return dj.html('<div>Perhaps you need some help? try <a href="/help/docs">here</a></div>')
parts=["pages", "index"]
elif len(parts) == 1:
if parts[0].find(".") > -1:
return dj.file(parts[0])
parts.insert(0, "pages")
try:
ep = parts.pop(0)
if ep not in endpoints.djangle_endpoints:
if ep=="static":
fn = "./static/"+ "/".join(parts)
print ("STATIC:", request.META.get('HTTP_ACCEPT'), fn)
return dj.binary(fn, request.META.get('HTTP_ACCEPT'))
else:
return dj.error("Unknown endpoint: %s" % ep)
except:
traceback.print_exc()
return dj.error("must specify a module")
try:
func = parts.pop(0)
except:
return dj.error("must specify a function")
if func not in funcnames:
return dj.error("must specify a valid function")
kwords = {}
if func in getattr(endpoints, "djangle_ret_meta", ()):
kwords['_request_meta_'] = request.META
func = funcmap[func]
format="json"
data=None
query = parse_qstring(rawquery)
# print ("Q:", query)
for key,val in query.items():
if key=="data":
data=bytes(val,encoding='utf8')
elif key == "format":
format = val
else:
kwords[key]=val
if request.method=="POST":
data = request.body#.decode('utf8'))
if data:
# print ("DATA: %d bytes" % len(data))
if format=="json":
data = json.loads(data.decode('utf8'))
elif format=="rows":
data = json.loads(data.decode('utf8'))
j = []
schema = data.pop(0)
for row in data:
j.append(dict(zip(schema,row)))
data=j
elif format == "columns":
data = json.loads(data.decode('utf8'))
j = []
m = max([len(x) for x in data.items()])
for i in range(m):
row={}
for k in data.keys():
row[k]=data[k][i]
j.append(row)
data = j
elif format == "csv":
delim = ","
if 'delimiter' in kwords:
delim = kwords['delimiter']
del kwords['delimiter']
f = io.StringIO(data.decode('utf8'))
data = []
r = csv.reader(f, delimiter=delim)
for schema in r:
break
r = csv.reader(f, delimiter=delim)
n = 0
for row in r:
try:
x = {}
# print( len(schema) , len(row))
if len(schema) < len(row):
print ("WARNING: unused data in row %d" % n)
if len(schema) > len(row):
print ("WARNING: unfilled fields in row %d" % n)
for key, val in zip(schema, row):
x[key] = val
data.append(x)
except:
print("ERROR: csv read error at row %d" % n)
traceback.print_exc()
n += 1
f.close()
elif format == "raw":
print ("RAW:", type(data), len(data))
return func(data=data)
else:
return dj.error("unknown format: %s" % format)
if data == None:
ret = func(*parts, **kwords)
if "Response" not in str(type(ret)):
ret = dj.json(ret)
else:
if type(data)!=list:
data=[data]
count=1
else:
count=len(data)
n=0
rets=[]
for row in data:
try:
kwords['data']=row
# print ("CALLFUNC:", parts, kwords)
ret = func(*parts, **kwords)
if type(ret)==dict and "error" in ret:
ret["rows_processed"]=n
return dj.json(ret)
if count == 1 or ret != None:
rets.append(ret)
except:
return dj.html(traceback.format_exc())
n+=1
if count>1:
ret=dj.json({"response":rets, "rows_processed": n})
elif count==1:
try:
ret = dj.json(rets[0])
except:
ret = "BADJSON: " + str(rets[0])
else:
ret = dj.error("No data processed")
# print ("RETURNS:",ret)
return ret
except:
status = 500
trace = traceback.format_exc()
print ("DJANGLE HTTP ERROR: %d EXCEPTION: %s" % (status, trace.strip().split("\n")[-1]))
print (trace)
return dj.error("HTTP 500 error", status=status)
|
<reponame>textileio/pygate-gRPC
from typing import Iterable, List, Tuple
from deprecated import deprecated
from google.protobuf.json_format import Parse
from proto import ffs_rpc_pb2, ffs_rpc_pb2_grpc
from pygate_grpc.errors import ErrorHandlerMeta, future_error_handler
TOKEN_KEY = "x-ffs-token"
CHUNK_SIZE = 1024 * 1024 # 1MB
def _generate_chunks(chunks: Iterable[bytes]) -> Iterable[ffs_rpc_pb2.StageRequest]:
for chunk in chunks:
yield ffs_rpc_pb2.StageRequest(chunk=chunk)
def chunks_to_bytes(chunks: Iterable[ffs_rpc_pb2.StageRequest]) -> Iterable[bytes]:
for c in chunks:
yield c.chunk
def bytes_to_chunks(bytes_iter: Iterable[bytes],) -> Iterable[ffs_rpc_pb2.StageRequest]:
for b in bytes_iter:
yield ffs_rpc_pb2.StageRequest(chunk=b)
def get_file_bytes(filename: str):
with open(filename, "rb") as f:
while True:
piece = f.read(CHUNK_SIZE)
if len(piece) == 0:
return
yield piece
class FfsClient(object, metaclass=ErrorHandlerMeta):
def __init__(self, channel):
self.client = ffs_rpc_pb2_grpc.RPCServiceStub(channel)
self.token = None
def set_token(self, token: str):
self.token = token
def create(self):
req = ffs_rpc_pb2.CreateRequest()
return self.client.Create(req)
def list_ffs(self):
req = ffs_rpc_pb2.ListAPIRequest()
return self.client.ListAPI(req)
def id(self, token: str):
req = ffs_rpc_pb2.IDRequest()
return self.client.ID(req, metadata=self._get_meta_data(token))
def addrs_list(self, token: str = None):
req = ffs_rpc_pb2.AddrsRequest()
return self.client.Addrs(req, metadata=self._get_meta_data(token))
def addrs_new(
self, name: str, type_: str = "", is_default: bool = False, token: str = None
):
req = ffs_rpc_pb2.NewAddrRequest(
name=name, address_type=type_, make_default=is_default
)
return self.client.NewAddr(req, metadata=self._get_meta_data(token))
def sign_message(self, addr: str, msg: bytes, token: str = None):
req = ffs_rpc_pb2.SignMessageRequest(addr=addr, msg=msg)
return self.client.SignMessage(req, metadata=self._get_meta_data(token))
def verify_message(
self, addr: str, msg: bytes, signature: bytes, token: str = None
):
req = ffs_rpc_pb2.VerifyMessageRequest(addr=addr, msg=msg, signature=signature)
return self.client.VerifyMessage(req, metadata=self._get_meta_data(token))
def default_config(self, token: str = None):
req = ffs_rpc_pb2.DefaultStorageConfigRequest()
return self.client.DefaultStorageConfig(
req, metadata=self._get_meta_data(token)
)
def default_config_for_cid(self, cid: str, token: str = None):
req = ffs_rpc_pb2.GetStorageConfigRequest(cid=cid)
return self.client.GetStorageConfig(req, metadata=self._get_meta_data(token))
# Currently you need to pass in the ffs_rpc_pb2.DefaultConfig. However, this is not a good design.
def set_default_config(self, config: str, token: str = None):
config = Parse(config, ffs_rpc_pb2.StorageConfig())
req = ffs_rpc_pb2.SetDefaultStorageConfigRequest(config=config)
return self.client.SetDefaultStorageConfig(
req, metadata=self._get_meta_data(token)
)
def show(self, cid: str, token: str = None):
req = ffs_rpc_pb2.ShowRequest(cid=cid)
return self.client.Show(req, metadata=self._get_meta_data(token))
def show_all(self, token: str = None):
req = ffs_rpc_pb2.ShowAllRequest()
return self.client.ShowAll(req, metadata=self._get_meta_data(token))
# Note that the chunkIter should be an iterator that yield `ffs_rpc_pb2.AddToHotRequest`,
# it is the caller's responsibility to create the iterator.
# The provided getFileChunks comes in handy some times.
# TODO: deprecate this.
@deprecated(version="0.0.6", reason="This method is deprecated")
def add_to_hot(
self, chunks_iter: Iterable[ffs_rpc_pb2.StageRequest], token: str = None
):
return self.client.Stage(chunks_iter, metadata=self._get_meta_data(token))
def stage(self, chunks_iter: Iterable[ffs_rpc_pb2.StageRequest], token: str = None):
return self.client.Stage(chunks_iter, metadata=self._get_meta_data(token))
# This will return an iterator which callers can look through
def get(self, cid: str, token: str = None) -> Iterable[bytes]:
req = ffs_rpc_pb2.GetRequest(cid=cid)
chunks = self.client.Get(req, metadata=self._get_meta_data(token))
return chunks_to_bytes(chunks)
def send_fil(self, sender: str, receiver: str, amount: int, token: str = None):
# To avoid name collision since `from` is reserved in Python.
kwargs = {"from": sender, "to": receiver, "amount": amount}
req = ffs_rpc_pb2.SendFilRequest(**kwargs)
return self.client.SendFil(req, metadata=self._get_meta_data(token))
@future_error_handler
def logs(self, cid, token: str = None, history: bool = False, timeout: int = None):
req = ffs_rpc_pb2.WatchLogsRequest(cid=cid, history=history)
return self.client.WatchLogs(
req, metadata=self._get_meta_data(token), timeout=timeout
)
def info(self, cid, token: str = None):
req = ffs_rpc_pb2.WatchLogsRequest(cid=cid)
return self.client.Info(req, metadata=self._get_meta_data(token))
def get_storage_job(self, jid: str, token: str = None):
req = ffs_rpc_pb2.GetStorageJobRequest(jid=jid)
return self.client.GetStorageJob(req, metadata=self._get_meta_data(token))
def push(
self, cid, token: str = None, override: bool = False, config: str = None,
):
if config:
config = Parse(config, ffs_rpc_pb2.StorageConfig())
req = ffs_rpc_pb2.PushStorageConfigRequest(
cid=cid,
override_config=override,
has_override_config=override,
config=config,
has_config=config is not None,
)
return self.client.PushStorageConfig(req, metadata=self._get_meta_data(token))
def close(self, token: str = None):
req = ffs_rpc_pb2.CloseRequest()
return self.client.Close(req, metadata=self._get_meta_data(token))
def list_pay_channel(self, token: str = None):
req = ffs_rpc_pb2.ListPayChannelsRequest()
return self.client.ListPayChannels(req, metadata=self._get_meta_data(token))
def create_pay_channel(
self, sender: str, receiver: str, amount: int, token: str = None
):
kwargs = {"from": sender, "to": receiver, "amount": amount}
req = ffs_rpc_pb2.CreatePayChannelRequest(**kwargs)
return self.client.CreatePayChannel(req, metadata=self._get_meta_data(token))
def redeem_pay_channel(
self, sender: str, receiver: str, amount: int, token: str = None
):
kwargs = {"from": sender, "to": receiver, "amount": amount}
req = ffs_rpc_pb2.CreateRequest(kwargs)
return self.client.CreatePayChannel(req, metadata=self._get_meta_data(token))
def list_storage_deal_records(
self,
include_final=True,
include_pending=False,
from_addrs: List[str] = None,
data_cids: List[str] = None,
ascending: bool = False,
token: str = None,
):
deal_config = ffs_rpc_pb2.ListDealRecordsConfig(
from_addrs=from_addrs,
data_cids=data_cids,
include_pending=include_pending,
include_final=include_final,
ascending=ascending,
)
req = ffs_rpc_pb2.ListStorageDealRecordsRequest(config=deal_config)
return self.client.ListStorageDealRecords(
req, metadata=self._get_meta_data(token)
)
def list_retrieval_deal_records(
self,
include_final=True,
include_pending=False,
from_addrs: List[str] = None,
data_cids: List[str] = None,
ascending: bool = False,
token: str = None,
):
deal_config = ffs_rpc_pb2.ListDealRecordsConfig(
from_addrs=from_addrs,
data_cids=data_cids,
include_pending=include_pending,
include_final=include_final,
ascending=ascending,
)
req = ffs_rpc_pb2.ListRetrievalDealRecordsRequest(config=deal_config)
return self.client.ListRetrievalDealRecords(
req, metadata=self._get_meta_data(token)
)
# The metadata is set in here https://github.com/textileio/js-powergate-client/blob
# /9d1ad04a7e1f2a6e18cc5627751f9cbddaf6fe05/src/util/grpc-helpers.ts#L7 Note that you can't have capital letter in
# meta data field, see here: https://stackoverflow.com/questions/45071567/how-to-send-custom-header-metadata-with
# -python-grpc
def _get_meta_data(self, token: str) -> Tuple[Tuple[str, str]]:
if token is not None:
return ((TOKEN_KEY, token),)
if self.token is not None:
return ((TOKEN_KEY, self.token),)
self._raise_no_token_provided_exception()
def _raise_no_token_provided_exception(self):
raise Exception(
"No token is provided, you should either call the set_token method to set"
+ " the token, or supplied the token in the method."
)
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for HDF5 object
"""
from __future__ import absolute_import, division, print_function
import os
import copy
import numpy as np
import nose.tools as nt
from astropy.time import Time
from pyuvdata import UVData
import pyuvdata.utils as uvutils
from pyuvdata.data import DATA_PATH
import pyuvdata.tests as uvtest
import warnings
import h5py
def test_ReadMiriadWriteUVH5ReadUVH5():
"""
Miriad round trip test
"""
uv_in = UVData()
uv_out = UVData()
miriad_file = os.path.join(DATA_PATH, 'zen.2456865.60537.xy.uvcRREAA')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_miriad.uvh5')
uvtest.checkWarnings(uv_in.read_miriad, [miriad_file],
nwarnings=1, category=[UserWarning],
message=['Altitude is not present'])
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# also test round-tripping phased data
uv_in.phase_to_time(Time(np.mean(uv_in.time_array), format='jd'))
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
def test_ReadUVFITSWriteUVH5ReadUVH5():
"""
UVFITS round trip test
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
def test_ReadUVH5Errors():
"""
Test raising errors in read function
"""
uv_in = UVData()
fake_file = os.path.join(DATA_PATH, 'fake_file.uvh5')
nt.assert_raises(IOError, uv_in.read_uvh5, fake_file)
nt.assert_raises(ValueError, uv_in.read_uvh5, ['list of', 'fake files'], read_data=False)
return
def test_WriteUVH5Errors():
"""
Test raising errors in write_uvh5 function
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
with open(testfile, 'a'):
os.utime(testfile, None)
nt.assert_raises(ValueError, uv_in.write_uvh5, testfile)
# use clobber=True to write out anyway
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
def test_UVH5OptionalParameters():
"""
Test reading and writing optional parameters not in sample files
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
# set optional parameters
uv_in.x_orientation = 'east'
uv_in.antenna_diameters = np.ones_like(uv_in.antenna_numbers) * 1.
uv_in.uvplane_reference_time = 0
# write out and read back in
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
def test_UVH5CompressionOptions():
"""
Test writing data with compression filters
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits_compression.uvh5')
# write out and read back in
uv_in.write_uvh5(testfile, clobber=True, data_compression="lzf",
flags_compression=None, nsample_compression=None)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
def test_UVH5ReadMultiple_files():
"""
Test reading multiple uvh5 files
"""
uv_full = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile1 = os.path.join(DATA_PATH, 'test/uv1.uvh5')
testfile2 = os.path.join(DATA_PATH, 'test/uv2.uvh5')
uvtest.checkWarnings(uv_full.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv1 = copy.deepcopy(uv_full)
uv2 = copy.deepcopy(uv_full)
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1.write_uvh5(testfile1, clobber=True)
uv2.write_uvh5(testfile2, clobber=True)
uv1.read([testfile1, testfile2])
# Check history is correct, before replacing and doing a full object check
nt.assert_true(uvutils._check_histories(uv_full.history + ' Downselected to '
'specific frequencies using pyuvdata. '
'Combined data along frequency axis using'
' pyuvdata.', uv1.history))
uv1.history = uv_full.history
nt.assert_equal(uv1, uv_full)
# clean up
os.remove(testfile1)
os.remove(testfile2)
return
def test_UVH5PartialRead():
"""
Test reading in only part of a dataset from disk
"""
uvh5_uv = UVData()
uvh5_uv2 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uvh5_uv.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
uvh5_uv.write_uvh5(testfile, clobber=True)
# select on antennas
ants_to_keep = np.array([0, 19, 11, 24, 3, 23, 1, 20, 21])
uvh5_uv.read(testfile, antenna_nums=ants_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# select on frequency channels
chans_to_keep = np.arange(12, 22)
uvh5_uv.read(testfile, freq_chans=chans_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(freq_chans=chans_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# select on pols
pols_to_keep = [-1, -2]
uvh5_uv.read(testfile, polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# select on read using time_range
unique_times = np.unique(uvh5_uv.time_array)
uvtest.checkWarnings(uvh5_uv.read, [testfile],
{'time_range': [unique_times[0], unique_times[1]]},
message=['Warning: "time_range" keyword is set'])
uvh5_uv2.read(testfile)
uvh5_uv2.select(times=unique_times[0:2])
nt.assert_equal(uvh5_uv, uvh5_uv2)
# now test selecting on multiple axes
# frequencies first
uvh5_uv.read(testfile, antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# baselines first
ants_to_keep = np.array([0, 1])
uvh5_uv.read(testfile, antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# polarizations first
ants_to_keep = np.array([0, 1, 2, 3, 6, 7, 8, 11, 14, 18, 19, 20, 21, 22])
chans_to_keep = np.arange(12, 64)
uvh5_uv.read(testfile, antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# clean up
os.remove(testfile)
return
def test_UVH5PartialWrite():
"""
Test writing an entire UVH5 file in pieces
"""
full_uvh5 = UVData()
partial_uvh5 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(full_uvh5.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
full_uvh5.write_uvh5(testfile, clobber=True)
full_uvh5.read(testfile)
# delete data arrays in partial file
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
# write to file by iterating over antpairpol
antpairpols = full_uvh5.get_antpairpols()
for key in antpairpols:
data = full_uvh5.get_data(key, squeeze='none')
flags = full_uvh5.get_flags(key, squeeze='none')
nsamples = full_uvh5.get_nsamples(key, squeeze='none')
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
bls=key)
# now read in the full file and make sure that it matches the original
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# start over, and write frequencies
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
Nfreqs = full_uvh5.Nfreqs
Hfreqs = Nfreqs // 2
freqs1 = np.arange(Hfreqs)
freqs2 = np.arange(Hfreqs, Nfreqs)
data = full_uvh5.data_array[:, :, freqs1, :]
flags = full_uvh5.flag_array[:, :, freqs1, :]
nsamples = full_uvh5.nsample_array[:, :, freqs1, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
freq_chans=freqs1)
data = full_uvh5.data_array[:, :, freqs2, :]
flags = full_uvh5.flag_array[:, :, freqs2, :]
nsamples = full_uvh5.nsample_array[:, :, freqs2, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
freq_chans=freqs2)
# read in the full file and make sure it matches
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# start over, write chunks of blts
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
Nblts = full_uvh5.Nblts
Hblts = Nblts // 2
blts1 = np.arange(Hblts)
blts2 = np.arange(Hblts, Nblts)
data = full_uvh5.data_array[blts1, :, :, :]
flags = full_uvh5.flag_array[blts1, :, :, :]
nsamples = full_uvh5.nsample_array[blts1, :, :, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
blt_inds=blts1)
data = full_uvh5.data_array[blts2, :, :, :]
flags = full_uvh5.flag_array[blts2, :, :, :]
nsamples = full_uvh5.nsample_array[blts2, :, :, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
blt_inds=blts2)
# read in the full file and make sure it matches
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# start over, write groups of pols
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
Npols = full_uvh5.Npols
Hpols = Npols // 2
pols1 = np.arange(Hpols)
pols2 = np.arange(Hpols, Npols)
data = full_uvh5.data_array[:, :, :, pols1]
flags = full_uvh5.flag_array[:, :, :, pols1]
nsamples = full_uvh5.nsample_array[:, :, :, pols1]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
polarizations=full_uvh5.polarization_array[:Hpols])
data = full_uvh5.data_array[:, :, :, pols2]
flags = full_uvh5.flag_array[:, :, :, pols2]
nsamples = full_uvh5.nsample_array[:, :, :, pols2]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
polarizations=full_uvh5.polarization_array[Hpols:])
# read in the full file and make sure it matches
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# clean up
os.remove(testfile)
os.remove(partial_testfile)
return
def test_UVH5PartialWriteIrregular():
"""
Test writing a uvh5 file using irregular intervals
"""
def initialize_with_zeros(uvd, filename):
"""
Initialize a file with all zeros for data arrays
"""
uvd.initialize_uvh5_file(filename, clobber=True)
data_shape = (uvd.Nblts, 1, uvd.Nfreqs, uvd.Npols)
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
with h5py.File(filename, 'r+') as f:
dgrp = f['/Data']
data_dset = dgrp['visdata']
flags_dset = dgrp['flags']
nsample_dset = dgrp['nsamples']
data_dset = data
flags_dset = flags
nsample_dset = nsamples
return
full_uvh5 = UVData()
partial_uvh5 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(full_uvh5.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
full_uvh5.write_uvh5(testfile, clobber=True)
full_uvh5.read(testfile)
# delete data arrays in partial file
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# write a single blt to file
blt_inds = np.arange(1)
data = full_uvh5.data_array[blt_inds, :, :, :]
flags = full_uvh5.flag_array[blt_inds, :, :, :]
nsamples = full_uvh5.nsample_array[blt_inds, :, :, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples, blt_inds=blt_inds)
# also write the arrays to the partial object
partial_uvh5.data_array[blt_inds, :, :, :] = data
partial_uvh5.flag_array[blt_inds, :, :, :] = flags
partial_uvh5.nsample_array[blt_inds, :, :, :] = nsamples
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# do it again, with a single frequency
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# write a single freq to file
freq_inds = np.arange(1)
data = full_uvh5.data_array[:, :, freq_inds, :]
flags = full_uvh5.flag_array[:, :, freq_inds, :]
nsamples = full_uvh5.nsample_array[:, :, freq_inds, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
freq_chans=freq_inds)
# also write the arrays to the partial object
partial_uvh5.data_array[:, :, freq_inds, :] = data
partial_uvh5.flag_array[:, :, freq_inds, :] = flags
partial_uvh5.nsample_array[:, :, freq_inds, :] = nsamples
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# do it again, with a single polarization
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# write a single pol to file
pol_inds = np.arange(1)
data = full_uvh5.data_array[:, :, :, pol_inds]
flags = full_uvh5.flag_array[:, :, :, pol_inds]
nsamples = full_uvh5.nsample_array[:, :, :, pol_inds]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
polarizations=partial_uvh5.polarization_array[pol_inds])
# also write the arrays to the partial object
partial_uvh5.data_array[:, :, :, pol_inds] = data
partial_uvh5.flag_array[:, :, :, pol_inds] = flags
partial_uvh5.nsample_array[:, :, :, pol_inds] = nsamples
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced blts and freqs
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
blt_inds = [0, 1, 2, 7]
freq_inds = [0, 2, 3, 4]
data_shape = (len(blt_inds), 1, len(freq_inds), full_uvh5.Npols)
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for iblt, blt_idx in enumerate(blt_inds):
for ifreq, freq_idx in enumerate(freq_inds):
data[iblt, :, ifreq, :] = full_uvh5.data_array[blt_idx, :, freq_idx, :]
flags[iblt, :, ifreq, :] = full_uvh5.flag_array[blt_idx, :, freq_idx, :]
nsamples[iblt, :, ifreq, :] = full_uvh5.nsample_array[blt_idx, :, freq_idx, :]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'blt_inds': blt_inds, 'freq_chans': freq_inds},
message='Selected frequencies are not evenly spaced')
# also write the arrays to the partial object
for iblt, blt_idx in enumerate(blt_inds):
for ifreq, freq_idx in enumerate(freq_inds):
partial_uvh5.data_array[blt_idx, :, freq_idx, :] = data[iblt, :, ifreq, :]
partial_uvh5.flag_array[blt_idx, :, freq_idx, :] = flags[iblt, :, ifreq, :]
partial_uvh5.nsample_array[blt_idx, :, freq_idx, :] = nsamples[iblt, :, ifreq, :]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced freqs and pols
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
freq_inds = [0, 1, 2, 7]
pol_inds = [0, 1, 3]
data_shape = (full_uvh5.Nblts, 1, len(freq_inds), len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
data[:, :, ifreq, ipol] = full_uvh5.data_array[:, :, freq_idx, pol_idx]
flags[:, :, ifreq, ipol] = full_uvh5.flag_array[:, :, freq_idx, pol_idx]
nsamples[:, :, ifreq, ipol] = full_uvh5.nsample_array[:, :, freq_idx, pol_idx]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'freq_chans': freq_inds, 'polarizations': full_uvh5.polarization_array[pol_inds]},
nwarnings=2, message=['Selected frequencies are not evenly spaced',
'Selected polarization values are not evenly spaced'])
# also write the arrays to the partial object
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
partial_uvh5.data_array[:, :, freq_idx, pol_idx] = data[:, :, ifreq, ipol]
partial_uvh5.flag_array[:, :, freq_idx, pol_idx] = flags[:, :, ifreq, ipol]
partial_uvh5.nsample_array[:, :, freq_idx, pol_idx] = nsamples[:, :, ifreq, ipol]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced blts and pols
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
blt_inds = [0, 1, 2, 7]
pol_inds = [0, 1, 3]
data_shape = (len(blt_inds), 1, full_uvh5.Nfreqs, len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for iblt, blt_idx in enumerate(blt_inds):
for ipol, pol_idx in enumerate(pol_inds):
data[iblt, :, :, ipol] = full_uvh5.data_array[blt_idx, :, :, pol_idx]
flags[iblt, :, :, ipol] = full_uvh5.flag_array[blt_idx, :, :, pol_idx]
nsamples[iblt, :, :, ipol] = full_uvh5.nsample_array[blt_idx, :, :, pol_idx]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'blt_inds': blt_inds, 'polarizations': full_uvh5.polarization_array[pol_inds]},
message='Selected polarization values are not evenly spaced')
# also write the arrays to the partial object
for iblt, blt_idx in enumerate(blt_inds):
for ipol, pol_idx in enumerate(pol_inds):
partial_uvh5.data_array[blt_idx, :, :, pol_idx] = data[iblt, :, :, ipol]
partial_uvh5.flag_array[blt_idx, :, :, pol_idx] = flags[iblt, :, :, ipol]
partial_uvh5.nsample_array[blt_idx, :, :, pol_idx] = nsamples[iblt, :, :, ipol]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced freqs and pols
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
freq_inds = [0, 1, 2, 7]
pol_inds = [0, 1, 3]
data_shape = (full_uvh5.Nblts, 1, len(freq_inds), len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
data[:, :, ifreq, ipol] = full_uvh5.data_array[:, :, freq_idx, pol_idx]
flags[:, :, ifreq, ipol] = full_uvh5.flag_array[:, :, freq_idx, pol_idx]
nsamples[:, :, ifreq, ipol] = full_uvh5.nsample_array[:, :, freq_idx, pol_idx]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'freq_chans': freq_inds, 'polarizations': full_uvh5.polarization_array[pol_inds]},
nwarnings=2, message=['Selected frequencies are not evenly spaced',
'Selected polarization values are not evenly spaced'])
# also write the arrays to the partial object
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
partial_uvh5.data_array[:, :, freq_idx, pol_idx] = data[:, :, ifreq, ipol]
partial_uvh5.flag_array[:, :, freq_idx, pol_idx] = flags[:, :, ifreq, ipol]
partial_uvh5.nsample_array[:, :, freq_idx, pol_idx] = nsamples[:, :, ifreq, ipol]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced everything
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
blt_inds = [0, 1, 2, 7]
freq_inds = [0, 2, 3, 4]
pol_inds = [0, 1, 3]
data_shape = (len(blt_inds), 1, len(freq_inds), len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for iblt, blt_idx in enumerate(blt_inds):
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
data[iblt, :, ifreq, ipol] = full_uvh5.data_array[blt_idx, :, freq_idx, pol_idx]
flags[iblt, :, ifreq, ipol] = full_uvh5.flag_array[blt_idx, :, freq_idx, pol_idx]
nsamples[iblt, :, ifreq, ipol] = full_uvh5.nsample_array[blt_idx, :, freq_idx, pol_idx]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'blt_inds': blt_inds, 'freq_chans': freq_inds,
'polarizations': full_uvh5.polarization_array[pol_inds]},
nwarnings=2, message=['Selected frequencies are not evenly spaced',
'Selected polarization values are not evenly spaced'])
# also write the arrays to the partial object
for iblt, blt_idx in enumerate(blt_inds):
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
partial_uvh5.data_array[blt_idx, :, freq_idx, pol_idx] = data[iblt, :, ifreq, ipol]
partial_uvh5.flag_array[blt_idx, :, freq_idx, pol_idx] = flags[iblt, :, ifreq, ipol]
partial_uvh5.nsample_array[blt_idx, :, freq_idx, pol_idx] = nsamples[iblt, :, ifreq, ipol]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# clean up
os.remove(testfile)
return
def test_UVH5PartialWriteErrors():
"""
Test errors in uvh5_write_part method
"""
full_uvh5 = UVData()
partial_uvh5 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(full_uvh5.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
full_uvh5.write_uvh5(testfile, clobber=True)
full_uvh5.read(testfile)
# get a waterfall
antpairpols = full_uvh5.get_antpairpols()
key = antpairpols[0]
data = full_uvh5.get_data(key, squeeze='none')
flags = full_uvh5.get_data(key, squeeze='none')
nsamples = full_uvh5.get_data(key, squeeze='none')
# delete data arrays in partial file
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# try to write to a file that doesn't exists
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
if os.path.exists(partial_testfile):
os.remove(partial_testfile)
nt.assert_raises(AssertionError, partial_uvh5.write_uvh5_part, partial_testfile, data,
flags, nsamples, bls=key)
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
# pass in arrays that are different sizes
nt.assert_raises(AssertionError, partial_uvh5.write_uvh5_part, partial_testfile, data,
flags[:, :, :, 0], nsamples, bls=key)
nt.assert_raises(AssertionError, partial_uvh5.write_uvh5_part, partial_testfile, data,
flags, nsamples[:, :, :, 0], bls=key)
# pass in arrays that are the same size, but don't match expected shape
nt.assert_raises(AssertionError, partial_uvh5.write_uvh5_part, partial_testfile, data[:, :, :, 0],
flags[:, :, :, 0], nsamples[:, :, :, 0])
# initialize a file on disk, and pass in a different object so check_header fails
empty_uvd = UVData()
nt.assert_raises(AssertionError, empty_uvd.write_uvh5_part, partial_testfile, data,
flags, nsamples, bls=key)
# clean up
os.remove(testfile)
os.remove(partial_testfile)
return
def test_UVH5InitializeFile():
"""
Test initializing a UVH5 file on disk
"""
full_uvh5 = UVData()
partial_uvh5 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(full_uvh5.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
full_uvh5.write_uvh5(testfile, clobber=True)
full_uvh5.read(testfile)
full_uvh5.data_array = None
full_uvh5.flag_array = None
full_uvh5.nsample_array = None
# initialize file
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
# read it in and make sure that the metadata matches the original
partial_uvh5.read(partial_testfile, read_data=False)
nt.assert_equal(partial_uvh5, full_uvh5)
# add options for compression
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True, data_compression="lzf",
flags_compression=None, nsample_compression=None)
partial_uvh5.read(partial_testfile, read_data=False)
nt.assert_equal(partial_uvh5, full_uvh5)
# check that an error is raised then file exists and clobber is False
nt.assert_raises(ValueError, partial_uvh5.initialize_uvh5_file, partial_testfile, clobber=False)
# clean up
os.remove(testfile)
os.remove(partial_testfile)
return
def test_UVH5SingleIntegrationTime():
"""
Check backwards compatibility warning for files with a single integration time
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv_in.write_uvh5(testfile, clobber=True)
# change integration_time in file to be a single number
with h5py.File(testfile, 'r+') as f:
int_time = f['/Header/integration_time'].value[0]
del(f['/Header/integration_time'])
f['/Header/integration_time'] = int_time
uvtest.checkWarnings(uv_out.read_uvh5, [testfile], message='outtest_uvfits.uvh5 appears to be an old uvh5 format')
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
def test_UVH5LstArray():
"""
Test different cases of the lst_array
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv_in.write_uvh5(testfile, clobber=True)
# remove lst_array from file; check that it's correctly computed on read
with h5py.File(testfile, 'r+') as f:
del(f['/Header/lst_array'])
uv_out.read_uvh5(testfile)
nt.assert_equal(uv_in, uv_out)
# now change what's in the file and make sure a warning is raised
uv_in.write_uvh5(testfile, clobber=True)
with h5py.File(testfile, 'r+') as f:
lst_array = f['/Header/lst_array'].value
del(f['/Header/lst_array'])
f['/Header/lst_array'] = 2 * lst_array
uvtest.checkWarnings(uv_out.read_uvh5, [testfile],
message='LST values stored in outtest_uvfits.uvh5 are not self-consistent')
uv_out.lst_array = lst_array
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
|
<filename>celery/tests/test_worker_control.py<gh_stars>1-10
import socket
import unittest2 as unittest
from celery import conf
from celery.decorators import task
from celery.registry import tasks
from celery.task.builtins import PingTask
from celery.utils import gen_unique_id
from celery.worker import control
from celery.worker.buckets import FastQueue
from celery.worker.state import revoked
from celery.worker.scheduler import Scheduler
hostname = socket.gethostname()
@task(rate_limit=200) # for extra info in dump_tasks
def mytask():
pass
class Dispatcher(object):
enabled = None
def __init__(self, *args, **kwargs):
self.sent = []
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def send(self, event):
self.sent.append(event)
class Listener(object):
def __init__(self):
self.ready_queue = FastQueue()
self.ready_queue.put("the quick brown fox")
self.eta_schedule = Scheduler(self.ready_queue)
self.event_dispatcher = Dispatcher()
class test_ControlPanel(unittest.TestCase):
def setUp(self):
self.panel = self.create_panel(listener=Listener())
def create_panel(self, **kwargs):
return control.ControlDispatch(hostname=hostname, **kwargs)
def test_disable_events(self):
listener = Listener()
panel = self.create_panel(listener=listener)
listener.event_dispatcher.enabled = True
panel.execute("disable_events")
self.assertEqual(listener.event_dispatcher.enabled, False)
self.assertIn("worker-offline", listener.event_dispatcher.sent)
def test_enable_events(self):
listener = Listener()
panel = self.create_panel(listener=listener)
listener.event_dispatcher.enabled = False
panel.execute("enable_events")
self.assertEqual(listener.event_dispatcher.enabled, True)
self.assertIn("worker-online", listener.event_dispatcher.sent)
def test_dump_tasks(self):
info = "\n".join(self.panel.execute("dump_tasks"))
self.assertIn("mytask", info)
self.assertIn("rate_limit=200", info)
def test_dump_schedule(self):
listener = Listener()
panel = self.create_panel(listener=listener)
self.assertFalse(panel.execute("dump_schedule"))
listener.eta_schedule.enter("foo", eta=100)
self.assertTrue(panel.execute("dump_schedule"))
def test_dump_reserved(self):
listener = Listener()
panel = self.create_panel(listener=listener)
info = "\n".join(panel.execute("dump_reserved"))
self.assertIn("the quick brown fox", info)
listener.ready_queue = FastQueue()
info = "\n".join(panel.execute("dump_reserved"))
self.assertFalse(info)
def test_rate_limit_when_disabled(self):
conf.DISABLE_RATE_LIMITS = True
try:
e = self.panel.execute("rate_limit", kwargs=dict(
task_name=mytask.name, rate_limit="100/m"))
self.assertIn("rate limits disabled", e.get("error"))
finally:
conf.DISABLE_RATE_LIMITS = False
def test_rate_limit_invalid_rate_limit_string(self):
e = self.panel.execute("rate_limit", kwargs=dict(
task_name="tasks.add", rate_limit="x1240301#%!"))
self.assertIn("Invalid rate limit string", e.get("error"))
def test_rate_limit(self):
class Listener(object):
class ReadyQueue(object):
fresh = False
def refresh(self):
self.fresh = True
def __init__(self):
self.ready_queue = self.ReadyQueue()
listener = Listener()
panel = self.create_panel(listener=listener)
task = tasks[PingTask.name]
old_rate_limit = task.rate_limit
try:
panel.execute("rate_limit", kwargs=dict(task_name=task.name,
rate_limit="100/m"))
self.assertEqual(task.rate_limit, "100/m")
self.assertTrue(listener.ready_queue.fresh)
listener.ready_queue.fresh = False
panel.execute("rate_limit", kwargs=dict(task_name=task.name,
rate_limit=0))
self.assertEqual(task.rate_limit, 0)
self.assertTrue(listener.ready_queue.fresh)
finally:
task.rate_limit = old_rate_limit
def test_rate_limit_nonexistant_task(self):
self.panel.execute("rate_limit", kwargs={
"task_name": "xxxx.does.not.exist",
"rate_limit": "1000/s"})
def test_unexposed_command(self):
self.panel.execute("foo", kwargs={})
def test_revoke_with_name(self):
uuid = gen_unique_id()
m = {"command": "revoke",
"destination": hostname,
"task_id": uuid,
"task_name": mytask.name}
self.panel.dispatch_from_message(m)
self.assertIn(uuid, revoked)
def test_revoke_with_name_not_in_registry(self):
uuid = gen_unique_id()
m = {"command": "revoke",
"destination": hostname,
"task_id": uuid,
"task_name": "xxxxxxxxx33333333388888"}
self.panel.dispatch_from_message(m)
self.assertIn(uuid, revoked)
def test_revoke(self):
uuid = gen_unique_id()
m = {"command": "revoke",
"destination": hostname,
"task_id": uuid}
self.panel.dispatch_from_message(m)
self.assertIn(uuid, revoked)
m = {"command": "revoke",
"destination": "does.not.exist",
"task_id": uuid + "xxx"}
self.panel.dispatch_from_message(m)
self.assertNotIn(uuid + "xxx", revoked)
def test_ping(self):
m = {"command": "ping",
"destination": hostname}
r = self.panel.dispatch_from_message(m)
self.assertEqual(r, "pong")
def test_shutdown(self):
m = {"command": "shutdown",
"destination": hostname}
self.assertRaises(SystemExit, self.panel.dispatch_from_message, m)
def test_panel_reply(self):
replies = []
class MockReplyPublisher(object):
def __init__(self, *args, **kwargs):
pass
def send(self, reply, **kwargs):
replies.append(reply)
def close(self):
pass
class _Dispatch(control.ControlDispatch):
ReplyPublisher = MockReplyPublisher
panel = _Dispatch(hostname, listener=Listener())
r = panel.execute("ping", reply_to={"exchange": "x",
"routing_key": "x"})
self.assertEqual(r, "pong")
self.assertDictEqual(replies[0], {panel.hostname: "pong"})
|
# TODO: real error handling
# TODO: fix how ports work
from curses import A_REVERSE
INT_MIN = -999
INT_MAX = 999
CODE_LINES = 15
LINE_LEN = 18
MODE_RUN = 0
MODE_READ = 1
MODE_WRITE = 2
SRC = 0
DST = 1
LABEL = 2
REG_PORTS = ["UP", "DOWN", "LEFT", "RIGHT"]
PORTS = REG_PORTS + ["ANY", "LAST"]
REGISTERS = ["ACC", "NIL"]
port_arrows = [[" | ^",
" | |",
" v |"],
["",
" -->",
"",
" <--"]]
port_val_spacing = map(lambda x: ' '*x, [False, 2, 1, 1, 0])
def clamp(val, low, high):
"""Return val, within [low,high]"""
if val < low:
return low
elif val > high:
return high
return val
class Port:
def __init__(self):
self.val = None
def __str__(self):
return "Val: %s" % ("?" if self.val is None else str(self.val))
def peek(self):
return self.val
def take(self):
ret = self.val
self.val = None
return ret
def give(self, val):
if self.val != None:
raise
self.val = val
def print_static_nc(self, window, direction):
window.clear()
for line_num, line in enumerate(port_arrows[direction]):
window.addstr(line_num, 0, line)
# TODO: print different val's when ports are fixed
def print_nc(self, window, direction):
valstr = '?' if self.val is None else str(self.val)
if direction == 1: # horizontal port
window.addstr(0, 0, port_val_spacing[len(valstr)] + valstr)
window.addstr(4, 0, port_val_spacing[len(valstr)] + valstr)
else: # vertical port
window.addstr(1, 0, valstr.rjust(4))
window.addstr(1, 7, valstr.ljust(4))
window.refresh()
class Node:
def __init__(self):
self.acc = 0
self.bak = 0
self.mode = MODE_RUN
self.ports = {}
self.ports["LAST"] = None
self.code = ""
self.code_lines = []
self.program = ""
self.labels = {}
self.pc = 0
self.prev_pc = 0
self.program_length = 0
self.display_lines = []
self.program_name = None
self.instructions = {
"NOP": self.nop,
"MOV": self.mov,
"SWP": self.swp,
"SAV": self.sav,
"ADD": self.add,
"SUB": self.sub,
"NEG": self.neg,
"JMP": self.jmp,
"JEZ": self.jez,
"JNZ": self.jnz,
"JGZ": self.jgz,
"JLZ": self.jlz,
"JRO": self.jro
}
self.argument_rules = {
"NOP": (),
"MOV": (SRC, DST),
"SWP": (),
"SAV": (),
"ADD": (SRC,),
"SUB": (SRC,),
"NEG": (),
"JMP": (LABEL,),
"JEZ": (LABEL,),
"JNZ": (LABEL,),
"JGZ": (LABEL,),
"JLZ": (LABEL,),
"JRO": (SRC,)
}
def str_static(self):
self.code_lines = self.code.split('\n')
self.code_lines += [''] * (CODE_LINES - len(self.code_lines))
for c in xrange(CODE_LINES):
self.code_lines[c] = self.code_lines[c].ljust(LINE_LEN, ' ')
s = "--------------------------\n"
for i in xrange(CODE_LINES):
s += '|' + self.code_lines[i] + '|'
if i == 0:
s += " ACC |\n"
elif i == 2:
s += "-----|\n"
elif i == 3:
s += " BAK |\n"
elif i == 5:
s += "-----|\n"
else:
s += " |\n"
s += "--------------------------"
return s
def print_static_nc(self, window):
window.clear()
window.addstr(self.str_static())
def print_nc(self, window):
# TODO: ~magic~numbers~
if self.display_lines:
window.addstr(self.display_lines[self.prev_pc]+1, 1,
self.code_lines[self.display_lines[self.prev_pc]])
window.addstr(self.display_lines[self.pc]+1, 1,
self.code_lines[self.display_lines[self.pc]],
A_REVERSE)
if self.acc <= -100:
acc_str = str(self.acc).center(5)
else:
acc_str = ("(%s)" % (str(self.acc))).center(5)
if self.bak <= -100:
bak_str = str(self.bak).center(5)
else:
bak_str = ("(%s)" % (str(self.bak))).center(5)
window.addstr(2, 20, acc_str.center(5))
window.addstr(5, 20, bak_str.center(5))
window.refresh()
def add_port(self, name, port):
"""Add a port to the Node"""
self.ports[name] = port
def resolve_src(self, src):
"""Get the numerical value of a "SRC" argument.
If the argument is a port, it will take the value."""
if src in PORTS:
return self.ports[src].take()
self.ports["LAST"] = self.ports[src]
elif src == "ACC":
return self.acc
elif src == "NIL":
return 0
else:
return src
def label_jump(self, label):
return self.labels[label] - self.pc
# Command functions all return the change in the PC after running
def nop(self):
return add("NIL")
def mov(self, src, dst):
if src in PORTS and self.ports[src].peek() is None:
return 0
elif dst in PORTS:
if self.ports[dst].peek() is None:
self.ports[dst].give(self.resolve_src(src))
else:
return 0
elif dst == "ACC": #TODO: acc doesn't get set? (node 2,0)
self.acc = self.resolve_src(src)
return 1
def swp(self):
self.acc, self.bak = self.bak, self.acc
return 1
def sav(self):
self.bak = self.acc
return 1
def add(self, src):
val = self.resolve_src(src)
if val is None:
return 0
else:
self.acc = clamp(self.acc + val, INT_MIN, INT_MAX)
return 1
def sub(self, src):
val = self.resolve_src(src)
if val is None:
return 0
else:
self.acc = clamp(self.acc - val, INT_MIN, INT_MAX)
return 1
def neg(self):
self.acc = -self.acc
return 1
def jmp(self, label):
return self.label_jump(label)
def jez(self, label):
return self.label_jump(label) if self.acc == 0 else 1
def jnz(self, label):
return self.label_jump(label) if self.acc != 0 else 1
def jgz(self, label):
return self.label_jump(label) if self.acc > 0 else 1
def jlz(self, label):
return self.label_jump(label) if self.acc < 0 else 1
def jro(self, src):
val = self.resolve_src(src)
return 0 if val is None else val
def assemble(self):
"""Doesn't actually "assemble" just sets self.program to a
list of (function, arg1, ..., argn)
Also checks the validity of the args"""
splitlines = []
self.labels = {}
self.program_length = 0
self.display_lines = []
lone_labels = []
for line_num, raw_line in enumerate(self.code.split('\n')):
title_pos = raw_line.find("##") #program title
if title_pos != -1:
self.program_name = raw_line[title_pos+2:]
raw_line = raw_line[title_pos]
comment_pos = raw_line.find('#') #comments
if comment_pos != -1:
raw_line = raw_line[:comment_pos]
line = raw_line.strip().split()
if line:
if line[0][-1] == ':': #label
if line[1:]: #label with instruction
self.labels[line[0][:-1]] = self.program_length
for label in lone_labels:
self.labels[label] = self.program_length
lone_labels = []
splitlines.append(line[1:])
self.program_length += 1
self.display_lines.append(line_num)
else: #label without instruction
lone_labels.append(line[0][:-1])
else: #instruction
splitlines.append(line)
for label in lone_labels:
self.labels[label] = self.program_length
lone_labels = []
self.program_length += 1
self.display_lines.append(line_num)
self.program = []
for line in splitlines:
if line:
if line[0] in self.instructions:
args = [arg.rstrip(',') for arg in line[1:]]
if not self.check_args(
args,
self.argument_rules[line[0]]):
raise
else:
self.program.append(
(self.instructions[line[0]],)
+ tuple(args))
else:
raise
else:
raise #label finding is supposed to strip blank lines
def check_args(self, args, rules):
"""Check that a list of arguments "args", fits the rules
described by the "rules" array"""
if len(args) != len(rules):
return False
for argnum in xrange(len(args)):
if rules[argnum] == SRC:
if ((args[argnum] not in PORTS) and
(args[argnum] not in REGISTERS) and
(not args[argnum].isdigit()) and
(not (args[0] == '-' and args[argnum][1:].isdigit()))):
print "wrong SRC"
return False
elif rules[argnum] == DST:
if ((args[argnum] not in PORTS) and
(args[argnum] not in REGISTERS)):
print "wrong DST"
return False
elif rules[argnum] == LABEL:
if args[argnum] not in self.labels:
print "wrong label"
return False
else:
raise
return False
return True
def step(self):
if self.program:
instr = self.program[self.pc]
self.prev_pc = self.pc
self.pc = (self.pc + instr[0](*instr[1:])) % self.program_length
|
"""Config flow for Metlink departure info."""
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import logging
from typing import Any, Dict, Optional
from aiohttp import ClientResponseError
from homeassistant import config_entries, core
from homeassistant.const import CONF_API_KEY
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_get_registry,
)
import voluptuous as vol
from .MetlinkAPI import Metlink
from .const import (
CONF_DEST,
CONF_NUM_DEPARTURES,
CONF_ROUTE,
CONF_STOP_ID,
CONF_STOPS,
DOMAIN,
)
from .sensor import metlink_unique_id
_LOGGER = logging.getLogger(__name__)
AUTH_SCHEMA = vol.Schema({vol.Required(CONF_API_KEY): cv.string})
STOP_SCHEMA = vol.Schema(
{
vol.Required(CONF_STOP_ID): vol.All(cv.string, vol.Length(min=4, max=4)),
vol.Optional(CONF_ROUTE, default=""): vol.All(cv.string, vol.Length(max=3)),
vol.Optional(CONF_DEST, default=""): cv.string,
vol.Optional(CONF_NUM_DEPARTURES, default=1): cv.positive_int,
vol.Optional("add_another", default=False): cv.boolean,
}
)
async def validate_auth(apikey: str, hass: core.HomeAssistant) -> None:
"""Validate a Metlink API key.
Raises a ValueError if the api key is invalid.
"""
session = async_get_clientsession(hass)
metlink = Metlink(session, apikey)
try:
await metlink.get_predictions("9999")
except ClientResponseError:
_LOGGER.error("Metlink API Key rejected by server")
raise ValueError
class MetlinkNZConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Metlink config flow."""
async def async_step_user(self, user_input: Dict[str, Any] = None):
"""Invoked when a user initiates a flow from the user interface."""
errors: Dict[str, str] = {}
if user_input is not None:
# Validate that the api key is valid.
_LOGGER.debug("Validating user supplied API key.")
try:
await validate_auth(user_input[CONF_API_KEY], self.hass)
except ValueError:
_LOGGER.warning("API key validation failed, restarting config")
errors["base"] = "auth"
if not errors:
self.data = user_input
self.data[CONF_STOPS] = []
# Return the form for the next step
_LOGGER.info("Proceeding to configure stops")
return await self.async_step_stop()
_LOGGER.info("Starting configuration process")
return self.async_show_form(
step_id="user", data_schema=AUTH_SCHEMA, errors=errors
)
async def async_step_stop(self, user_input: Optional[Dict[str, Any]] = None):
"""Second step in config flow to add a stop to watch."""
errors: Dict[str, str] = {}
if user_input is not None:
_LOGGER.info(f"Adding stop {user_input[CONF_STOP_ID]} to config.")
self.data[CONF_STOPS].append(
{
CONF_STOP_ID: user_input[CONF_STOP_ID],
CONF_ROUTE: user_input.get(CONF_ROUTE),
CONF_DEST: user_input.get(CONF_DEST),
CONF_NUM_DEPARTURES: user_input.get(CONF_NUM_DEPARTURES, 1),
}
)
# show the form again if add_another is ticked
if user_input.get("add_another", False):
_LOGGER.debug("Continuing to add another stop.")
return await self.async_step_stop()
# User is done adding stops, now create the config entry
n_stops = len(self.data[CONF_STOPS])
_LOGGER.info(f"Saving config with {n_stops} stops.")
return self.async_create_entry(title="Metlink", data=self.data)
_LOGGER.debug("Showing stop configuration form")
return self.async_show_form(
step_id="stop", data_schema=STOP_SCHEMA, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handles options flow for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry
async def async_step_init(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Manage the options for the component."""
entity_registry = await async_get_registry(self.hass)
entries = async_entries_for_config_entry(
entity_registry, self.config_entry.entry_id
)
errors: Dict[str, str] = {}
all_stops = {e.entity_id: e.original_name for e in entries}
stop_map = {e.entity_id: e for e in entries}
if user_input is not None:
_LOGGER.debug(f"Starting reconfiguration for {user_input}")
updated_stops = deepcopy(self.config_entry.data[CONF_STOPS])
_LOGGER.debug(f"Stops before reconfiguration: {updated_stops}")
# Remove unchecked stops.
removed_entities = [
entity_id
for entity_id in stop_map.keys()
if entity_id not in user_input["stops"]
]
for entity_id in removed_entities:
# Unregister from HA
entity_registry.async_remove(entity_id)
# Remove from our configured stops.
entry = stop_map[entity_id]
entry_stop = entry.unique_id
_LOGGER.info(f"Removing stop {entry_stop}")
updated_stops = [
e for e in updated_stops if metlink_unique_id(e) != entry_stop
]
_LOGGER.debug(f"Stops after removals: {updated_stops}")
if user_input.get(CONF_STOP_ID):
updated_stops.append(
{
CONF_STOP_ID: user_input[CONF_STOP_ID],
CONF_ROUTE: user_input.get(CONF_ROUTE),
CONF_DEST: user_input.get(CONF_DEST),
CONF_NUM_DEPARTURES: user_input.get(CONF_NUM_DEPARTURES, 1),
}
)
_LOGGER.debug(f"Reconfigured stops: {updated_stops}")
return self.async_create_entry(
title="",
data={CONF_STOPS: updated_stops},
)
options_schema = vol.Schema(
{
vol.Optional(
CONF_STOPS, default=list(all_stops.keys())
): cv.multi_select(all_stops),
vol.Optional(CONF_STOP_ID): vol.All(
cv.string, vol.Length(min=4, max=4)
),
vol.Optional(CONF_ROUTE, default=""): vol.All(
cv.string, vol.Length(max=3)
),
vol.Optional(CONF_DEST, default=""): cv.string,
vol.Optional(CONF_NUM_DEPARTURES, default=1): cv.positive_int,
}
)
_LOGGER.debug("Showing Reconfiguration form")
return self.async_show_form(
step_id="init", data_schema=options_schema, errors=errors
)
|
import unittest
from project.hero import Hero
class TestHero(unittest.TestCase):
USERNAME = "Main hero"
LEVEL = 10
HEALTH = 1000.1
DAMAGE = 100.2
def setUp(self):
self.hero = Hero(self.USERNAME, self.LEVEL, self.HEALTH, self.DAMAGE)
def test_hero__expect_valid_name_attr(self):
self.assertEqual(self.USERNAME, self.hero.username)
self.assertEqual(self.LEVEL, self.hero.level)
self.assertEqual(self.HEALTH, self.hero.health)
self.assertEqual(self.DAMAGE, self.hero.damage)
def test_hero_battle__when_battle_himself__expect_exception(self):
enemy_hero = self.hero
with self.assertRaises(Exception) as ex:
self.hero.battle(enemy_hero)
expected_exception = "You cannot fight yourself"
self.assertEqual(expected_exception, str(ex.exception))
def test_hero_battle__when_health_equal_zero__expect_exception(self):
enemy_hero = Hero("Enemy hero", 5, 100, 3)
self.hero.health = 0
with self.assertRaises(ValueError) as ex:
self.hero.battle(enemy_hero)
expected_exception = "Your health is lower than or equal to 0. You need to rest"
self.assertEqual(expected_exception, str(ex.exception))
def test_hero_battle__when_health_below_zero__expect_exception(self):
enemy_hero = Hero("Enemy hero", 5, 100, 3)
self.hero.health = -25
with self.assertRaises(ValueError) as ex:
self.hero.battle(enemy_hero)
expected_exception = "Your health is lower than or equal to 0. You need to rest"
self.assertEqual(expected_exception, str(ex.exception))
def test_hero_battle__when_enemy_hero_health_equal_zero__expect_exception(self):
enemy_hero = Hero("Enemy hero", 5, 0, 3)
with self.assertRaises(ValueError) as ex:
self.hero.battle(enemy_hero)
expected_exception = f"You cannot fight {enemy_hero.username}. He needs to rest"
self.assertEqual(expected_exception, str(ex.exception))
def test_hero_battle__when_enemy_hero_health_below_zero__expect_exception(self):
enemy_hero = Hero("Enemy hero", 5, -100, 3)
with self.assertRaises(ValueError) as ex:
self.hero.battle(enemy_hero)
expected_exception = f"You cannot fight {enemy_hero.username}. He needs to rest"
self.assertEqual(expected_exception, str(ex.exception))
def test_hero_battle__when_battle_is_draw__expect_msg(self):
enemy_hero = Hero("Enemy hero", self.LEVEL, self.HEALTH, self.DAMAGE)
expected_result = "Draw"
actual_result = self.hero.battle(enemy_hero)
self.assertEqual(expected_result, actual_result)
def test_hero_battle__when_hero_wins__expect_msg(self):
enemy_hero = Hero("Another_hero", 5, 100, 3)
expected_result = "You win"
expected_level = self.LEVEL + 1
expected_health = self.HEALTH - (enemy_hero.damage * enemy_hero.level) + 5
expected_damage = self.DAMAGE + 5
actual_result = self.hero.battle(enemy_hero)
self.assertEqual(expected_result, actual_result)
self.assertEqual(expected_level, self.hero.level)
self.assertEqual(expected_health, self.hero.health)
self.assertEqual(expected_damage, self.hero.damage)
def test_hero_battle__when_hero_lose__expect_msg(self):
enemy_hero = Hero("Another_hero", 11, 10000, 110)
expected_result = "You lose"
expected_level = enemy_hero.level + 1
expected_health = enemy_hero.health - (self.hero.damage * self.hero.level) + 5
expected_damage = enemy_hero.damage + 5
actual_result = self.hero.battle(enemy_hero)
self.assertEqual(expected_result, actual_result)
self.assertEqual(expected_level, enemy_hero.level)
self.assertEqual(expected_health, enemy_hero.health)
self.assertEqual(expected_damage, enemy_hero.damage)
def test_hero__str_repr(self):
expected_result = f"Hero {self.USERNAME}: {self.LEVEL} lvl\nHealth: {self.HEALTH}\nDamage: {self.DAMAGE}\n"
actual_result = self.hero.__str__()
self.assertEqual(expected_result, actual_result)
if __name__ == "__main__":
unittest.main() |
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from baskerville.features.feature_minutes_total import FeatureMinutesTotal
from baskerville.util.enums import FeatureComputeType
from pyspark.sql import functions as F, types as T
from tests.unit.baskerville_tests.helpers.spark_testing_base import \
FeatureSparkTestCase
class TestSparkMinutesTotal(FeatureSparkTestCase):
def setUp(self):
super(TestSparkMinutesTotal, self).setUp()
self.feature = FeatureMinutesTotal()
def test_instance(self):
self.assertTrue(hasattr(self.feature, 'feature_name'))
self.assertTrue(hasattr(self.feature, 'COLUMNS'))
self.assertTrue(hasattr(self.feature, 'DEPENDENCIES'))
self.assertTrue(hasattr(self.feature, 'DEFAULT_VALUE'))
self.assertTrue(hasattr(self.feature, 'compute_type'))
self.assertTrue(self.feature.feature_name == 'minutes_total')
self.assertTrue(self.feature.columns == ['@timestamp'])
self.assertTrue(self.feature.dependencies == [])
self.assertTrue(self.feature.DEFAULT_VALUE == 0.)
self.assertTrue(self.feature.compute_type ==
FeatureComputeType.replace)
self.assertIsNotNone(self.feature.feature_name)
self.assertIsNotNone(self.feature.feature_default)
self.assertTrue(isinstance(self.feature.feature_name, str))
self.assertTrue(isinstance(self.feature.feature_default, float))
def test_compute_single_record_first_subset(self):
ats_record = {
"client_ip": '55.555.55.55',
"client_request_host": 'host',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'application/javascript',
"client_url": 'page1/page2/page3?query',
}
sub_df = self.get_df_with_extra_cols(
self.feature,
[ats_record],
extra_cols={
'first_ever_request': F.lit(None).cast(
'timestamp')
}
)
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(0.).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_compute_single_record_subsequent_subset(self):
from pyspark.sql import functions as F
ats_record = {
"client_ip": '55.555.55.55',
"client_request_host": 'host',
"@timestamp": '2018-01-17T08:40:00.000Z',
"content_type": 'application/javascript',
"client_url": 'page1/page2/page3?query',
}
first_ever_request = '2018-01-17T08:30:00.000Z'
sub_df = self.get_df_with_extra_cols(
self.feature,
[ats_record],
extra_cols={
'first_ever_request': F.lit(first_ever_request).cast(
'timestamp')
}
)
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(10.).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_compute_multiple_records_first_subset(self):
from pyspark.sql import functions as F
first_ats_record = {
"client_ip": '55.555.55.55',
'client_request_host': 'test',
"@timestamp": '2018-01-17T08:35:00.000Z',
'agent': 'ua',
'content_type': 'application/javascript',
'request': '/one/two/three/four.png',
}
second_ats_record = {
"client_ip": '55.555.55.55',
'client_request_host': 'test',
"@timestamp": '2018-01-17T08:35:00.000Z',
'agent': 'ua',
'content_type': 'application/javascript',
'request': '/one/two/three.png',
}
third_ats_record = {
"client_ip": '55.555.55.55',
'client_request_host': 'test',
"@timestamp": '2018-01-17T08:50:00.000Z',
'agent': 'ua',
'content_type': 'html',
'request': '/one/two.png',
}
sub_df = self.get_df_with_extra_cols(
self.feature,
[first_ats_record, second_ats_record, third_ats_record],
extra_cols={
'first_ever_request': F.lit(None).cast(
'timestamp')
}
)
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(15.).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_compute_multiple_records_subsequent_subset(self):
from pyspark.sql import functions as F
first_ats_record = {
"client_ip": '55.555.55.55',
'client_request_host': 'test',
"first_ever_request": '2018-01-17T08:30:00.000Z',
"@timestamp": '2018-01-17T08:35:00.000Z',
'agent': 'ua',
'content_type': 'application/javascript',
'request': '/one/two/three/four.png',
}
second_ats_record = {
"client_ip": '55.555.55.55',
'client_request_host': 'test',
"first_ever_request": '2018-01-17T08:30:00.000Z',
"@timestamp": '2018-01-17T08:35:00.000Z',
'agent': 'ua',
'content_type': 'application/javascript',
'request': '/one/two/three.png',
}
third_ats_record = {
"client_ip": '55.555.55.55',
'client_request_host': 'test',
"first_ever_request": '2018-01-17T08:30:00.000Z',
"@timestamp": '2018-01-17T08:50:00.000Z',
'agent': 'ua',
'content_type': 'html',
'request': '/one/two.png',
}
first_ever_request = '2018-01-17T08:30:00.000Z'
sub_df = self.get_df_with_extra_cols(
self.feature,
[first_ats_record, second_ats_record, third_ats_record],
extra_cols={
'first_ever_request': F.lit(first_ever_request).cast(
'timestamp')
}
)
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(20.).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_update_row(self):
test_current = {self.feature.feature_name: 2.}
test_past = {self.feature.feature_name: 1.}
value = self.feature.update_row(
test_current, test_past
)
self.assertAlmostEqual(value, 2., places=2)
def test_update(self):
schema = T.StructType([
T.StructField(
self.feature.current_features_column,
T.MapType(T.StringType(), T.FloatType())
),
T.StructField(
self.feature.past_features_column,
T.MapType(T.StringType(), T.FloatType())
),
])
sub_df = self.session.createDataFrame(
[{
self.feature.current_features_column: {
self.feature.feature_name: 2.,
},
self.feature.past_features_column: {
self.feature.feature_name: 1.,
}
}],
schema=schema
)
result_df = self.feature.update(
sub_df
)
result_df.show()
value = result_df.select(
self.feature.updated_feature_col_name
).collect()[0][self.feature.updated_feature_col_name]
expected_value = 2.
self.assertAlmostEqual(value, expected_value, places=2)
|
<filename>dissertation/fetch.py<gh_stars>1-10
# Fetch all elasticity, piezo and diel properties from Material Project
from pymatgen import MPRester
from pymatgen.io.cif import CifWriter
import csv
if __name__ == '__main__':
MAPI_KEY = '<KEY>' # You must change this to your Materials API key! (or set MAPI_KEY env variable)
QUERY = 'mp-1180346' # change this to the mp-id of your compound of interest
mpr = MPRester(MAPI_KEY) # object for connecting to MP Rest interface
# All 89 elements in MP
element_list = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
'Kr',
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I',
'Xe',
'Cs', 'Ba', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi',
'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu']
# search_key in material project, including elasticity, piezo and diel
search_key = 'elasticity'
data = mpr.query(criteria={'elements': {'$in': element_list},
'has_bandstructure': True,
search_key: {'$exists': True},
},
properties=['material_id',
'pretty_formula',
'nelements',
'nsites',
'is_hubbard',
'is_compatible',
'volume',
'density',
'energy_per_atom',
'formation_energy_per_atom',
'structure',
search_key])
if search_key == 'elasticity':
new_file = open('./training/' + search_key + '/' + search_key + '.csv', 'w', encoding='utf-8')
csv_writer = csv.writer(new_file)
new_file_warnings = open('./training/elasticity/elasticity_warnings.csv', 'w', encoding='utf-8')
csv_writer_warnings = csv.writer(new_file_warnings)
else:
new_file = open('./training/' + search_key + '/' + search_key + '.csv', 'w', encoding='utf-8')
csv_writer = csv.writer(new_file)
# 10666 with elasticity and 3948 with warnings label, 6718 without warnings label
# 2791 with piezo
# 5796 with diel
for i in data:
row = []
material_id = i['material_id']
row.append(material_id)
row.append(i['pretty_formula'])
row.append(i['nelements'])
row.append(i['nsites'])
row.append(i['is_hubbard'])
row.append(i['is_compatible'])
row.append(i['volume'])
row.append(i['density'])
row.append(i['energy_per_atom'])
row.append(i['formation_energy_per_atom'])
# save cif and csv files
c = CifWriter(i['structure'])
# add G_Voigt_Reuss_Hill, K_Voigt_Reuss_Hill, elastic_anisotropy, poisson_ratio
if search_key == 'elasticity':
elasticity = i['elasticity']
row.append(elasticity['G_Voigt_Reuss_Hill'])
row.append(elasticity['K_Voigt_Reuss_Hill'])
row.append(elasticity['elastic_anisotropy'])
row.append(elasticity['poisson_ratio'])
cif_file = './training/elasticity/data/' + material_id + '.cif'
c.write_file(cif_file)
csv_writer_warnings.writerow(row)
if elasticity['warnings']:
continue
else:
csv_writer.writerow(row)
# add eij_max
elif search_key == 'piezo':
piezo = i['piezo']
row.append(piezo['eij_max'])
# add n - dielectric constant, poly_electronic - refractive index, poly_total - ferroelectricity
elif search_key == 'diel':
diel = i['diel']
row.append(diel['n'])
row.append(diel['poly_electronic'])
row.append(diel['poly_total'])
cif_file = './training/' + search_key + '/data/' + material_id + '.cif'
c.write_file(cif_file)
csv_writer.writerow(row)
|
#!/usr/bin/env python
import datetime
import json
import os
import pathlib
import re
import sys
from typing import List, Optional, Tuple
from vaccine_feed_ingest_schema import location as schema
from vaccine_feed_ingest.utils.log import getLogger
from vaccine_feed_ingest.utils.validation import BOUNDING_BOX
logger = getLogger(__file__)
output_dir = pathlib.Path(sys.argv[1])
input_dir = pathlib.Path(sys.argv[2])
json_filepaths = input_dir.glob("*.ndjson")
parsed_at_timestamp = datetime.datetime.utcnow().isoformat()
def _get_id(site: dict) -> str:
data_id = site["attributes"]["globalid"]
# Could parse these from directory traversal, but do not for now to avoid
# accidental mutation.
site_name = "arcgis"
runner = "az"
# Could parse these from the input file name, but do not for now to avoid
# accidental mutation.
arcgis = "128ead309d754558ad81bccd99188dc9"
layer = 0
return f"{runner}_{site_name}:{arcgis}_{layer}_{data_id}"
def _get_contacts(site: dict) -> Optional[List[schema.Contact]]:
contacts = []
if site["attributes"]["prereg_phone"]:
matches = list(
re.finditer(
r"(?P<area_code>\d\d\d)\)?-? ?(?P<rest_of_number>\d\d\d-\d\d\d\d)",
site["attributes"]["prereg_phone"],
)
)
if not matches:
logger.warning(
"unparseable phone number: '%s'", site["attributes"]["prereg_phone"]
)
return None
for match in matches:
phone = f"({match.group('area_code')}) {match.group('rest_of_number')}"
contacts.append(schema.Contact(contact_type="general", phone=phone))
website = site["attributes"]["prereg_website"]
if website:
# this edge case...
website = website.replace("htttp", "http")
if "http" not in website:
website = "https://" + website
website = website.replace(" ", "")
contacts.append(schema.Contact(contact_type="general", website=website))
if len(contacts) > 0:
return contacts
return None
def _get_languages(site: dict) -> Optional[List[str]]:
return {None: None, "Yes": ["en", "es"], "No": ["en"]}[
site["attributes"]["spanish_staff_y_n"]
]
def _get_opening_dates(site: dict) -> Optional[List[schema.OpenDate]]:
opens = None
closes = None
if site["attributes"]["begindate"] is not None:
opens = (
datetime.datetime.fromtimestamp(site["attributes"]["begindate"] // 1000)
.date()
.isoformat()
)
if site["attributes"]["enddate"] is not None:
closes = (
datetime.datetime.fromtimestamp(site["attributes"]["enddate"] // 1000)
.date()
.isoformat()
)
if opens is None and closes is None:
return None
return [
schema.OpenDate(
opens=opens,
closes=closes,
)
]
TIME_RANGE_RE = re.compile(
r"(?P<hour>\d{1,2})(:(?P<minute>\d{1,2}))?\s*(?P<am_pm>[AP]\.?M\.?)?"
)
def _parse_time(human_readable_time: str) -> Tuple[int, int]:
match = TIME_RANGE_RE.search(human_readable_time)
if match:
hour = int(match.group("hour"))
minute = int(match.group("minute") or "0")
if (1 <= hour <= 11) and ((match.group("am_pm") or "").startswith("P")):
hour += 12
return hour, minute
raise ValueError(human_readable_time)
def _normalize_time(human_readable_time: str) -> datetime.time:
hour, minute = _parse_time(human_readable_time)
return datetime.time(hour % 24, minute)
def _normalize_hours(
human_readable_hours: Optional[str], day: str
) -> List[schema.OpenHour]:
processed_hours = human_readable_hours
if processed_hours is None:
return []
processed_hours = processed_hours.upper()
if processed_hours == "8:00AM7:00PM":
return [schema.OpenHour(day=day, opens="08:00", closes="19:00")]
processed_hours = re.sub("^BY APPOINTMENT", "", processed_hours).strip()
if " AND " in processed_hours:
ranges = processed_hours.split(" AND ")
return sum((_normalize_hours(hours_range, day) for hours_range in ranges), [])
if ";" in processed_hours:
ranges = processed_hours.split(";")
return sum((_normalize_hours(hours_range, day) for hours_range in ranges), [])
if " TO " in processed_hours:
processed_hours = processed_hours.replace(" TO ", "-")
if processed_hours.count("-") != 1:
logger.warning("unparseable hours: '%s'", human_readable_hours)
return []
open_time, close_time = [x.strip() for x in re.split(r"\s*-\s*", processed_hours)]
opens = _normalize_time(open_time)
closes = _normalize_time(close_time)
if opens > closes:
if not re.search(r"[AP]\.?M\.?$", close_time):
# handle the "9-5" case, where the AM/PM is implied
closes = closes.replace(hour=closes.hour + 12)
elif len(re.findall(r"P\.?M\.?", processed_hours)) == 2:
# handle the "10PM - 5PM" typo cases
opens = opens.replace(hour=opens.hour - 12)
try:
return [
schema.OpenHour(
day=day,
opens=opens.isoformat("minutes"),
closes=closes.isoformat("minutes"),
)
]
except ValueError:
logger.warning("unparseable hours: '%s'", human_readable_hours)
return []
def _get_opening_hours(site: dict) -> Optional[List[schema.OpenHour]]:
hours = []
# print(site["attributes"])
for key, dow, hrs in zip(
[
"mon_open",
"tues_open",
"wed_open",
"thurs_open",
"fri_open",
"sat_open",
"sun_open",
],
["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"],
[
"mon_hrs",
"tues_hrs",
"wed_hrs",
"thurs_hrs",
"fri_hrs",
"sat_hrs",
"sun_hrs",
],
):
if key not in site["attributes"]:
continue
elif site["attributes"][key] == "Yes":
hours += _normalize_hours(site["attributes"][hrs], dow)
return hours if hours else None
def _get_inventory(site: dict) -> Optional[List[schema.Vaccine]]:
# Though the data source includes attributes for each possible vaccine, they
# do not appear to be used every time (rather this string is typically set)
inventory_str = site["attributes"]["vaccine_manufacturer"]
inventory = []
pfizer = re.search("pfizer", inventory_str, re.IGNORECASE)
moderna = re.search("moderna", inventory_str, re.IGNORECASE)
johnson = re.search(
"janssen|johnson.*johnson|j&j|j_j", inventory_str, re.IGNORECASE
)
if pfizer:
inventory.append(schema.Vaccine(vaccine=schema.VaccineType.PFIZER_BIONTECH))
if moderna:
inventory.append(schema.Vaccine(vaccine=schema.VaccineType.MODERNA))
if johnson:
inventory.append(
schema.Vaccine(vaccine=schema.VaccineType.JOHNSON_JOHNSON_JANSSEN)
)
if len(inventory) == 0:
logger.warning("No vaccines found in inventory: %s", inventory_str)
return None
return inventory
def _get_lat_lng(site: dict) -> Optional[schema.LatLng]:
lat_lng = schema.LatLng(
latitude=site["geometry"]["y"], longitude=site["geometry"]["x"]
)
# Some locations in the AZ data set have lat/lng near the south pole. Drop
# those values.
if not BOUNDING_BOX.latitude.contains(
lat_lng.latitude
) or not BOUNDING_BOX.longitude.contains(lat_lng.longitude):
return None
return lat_lng
def _get_normalized_location(site: dict, timestamp: str) -> schema.NormalizedLocation:
return schema.NormalizedLocation(
id=_get_id(site),
name=site["attributes"]["loc_name"],
address=schema.Address(
street1=site["attributes"]["addr1"],
street2=site["attributes"]["addr2"],
city=site["attributes"]["city"],
state="AZ",
zip=site["attributes"]["zip"],
),
location=_get_lat_lng(site),
contact=_get_contacts(site),
languages=_get_languages(site),
opening_dates=_get_opening_dates(site),
opening_hours=_get_opening_hours(site),
availability=None,
inventory=_get_inventory(site),
access=None,
parent_organization=None,
links=None,
notes=[site["attributes"]["prereg_comments"]]
if site["attributes"]["prereg_comments"]
else None,
active=None,
source=schema.Source(
source="az_arcgis",
id=site["attributes"]["globalid"],
fetched_from_uri="https://adhsgis.maps.arcgis.com/apps/opsdashboard/index.html#/5d636af4d5134a819833b1a3b906e1b6", # noqa: E501
fetched_at=timestamp,
data=site,
),
)
for in_filepath in json_filepaths:
filename, _ = os.path.splitext(in_filepath.name)
out_filepath = output_dir / f"{filename}.normalized.ndjson"
logger.info(
"normalizing %s => %s",
in_filepath,
out_filepath,
)
with in_filepath.open() as fin:
with out_filepath.open("w") as fout:
for site_json in fin:
parsed_site = json.loads(site_json)
if parsed_site["attributes"]["addr1"] is None:
continue
normalized_site = _get_normalized_location(
parsed_site, parsed_at_timestamp
)
json.dump(normalized_site.dict(), fout)
fout.write("\n")
|
<filename>Mathstein/mathroot.py
#!/usr/bin/env python
# coding: utf-8
# In[26]:
import math, cmath
import numpy as np
def graphgen(coeff):
"""
Generates a graph for any given equation
:param coeff: list of all the coefficients of an equation
:return: Graph object plotted based on the equation
"""
x = np.linspace(-10, 10, 1000)
y=0
degree=len(coeff)-1
#graph of a quadratic polynomial
if degree==3:
a,b,c,d=coeff
c=c-d
y=a*(x**2)+b*x+c
fig, ax = plt.subplots()
ax.plot(x, y)
return
#graph of a cubic polynomial
elif degree==4:
a,b,c,d,e=coeff
d=d-e
y=a*(x**3)+b*(x**2)+c*x+d
fig, ax = plt.subplots()
ax.plot(x, y)
return
#graph of a biquadratic polynomial
else:
a,b,c,d,e,f=coeff
e=e-f
y=a*(x**4)+b*(x**3)+c*(x**2)+d*x+e
fig, ax = plt.subplots()
ax.plot(x, y)
return
def quadraticsolver(coeff):
"""
Solves a quadratic equation
:param coeff: list of all the coefficients of an equation
:return: Roots of the equation
"""
a,b,c,d=coeff
c=c-d
if a==0:
return "Invalid equation"
dis=b*b-4*a*c
discrimi=math.sqrt(abs(dis))
if dis > 0:
r1=(-b + discrimi)/(2 * a)
r2=(-b - discrimi)/(2 * a)
return r1,r2
elif dis == 0:
return -b / (2*a)
else:
return "No real root possible"
def cubicsolver(coeff) :
"""
Solves a cubic equation
:param coeff: list of all the coefficients of an equation
:return: Roots of the equation
"""
A, B, C, D, E=coeff
start = 0
end = 100000
mid = 0
ans = 0
while (start <= end) :
# Find mid
mid = start + (end - start) // 2
ans = accessory_cubic(A, B, C, D, mid)
if (ans == E) :
return mid
if (ans < E) :
start = mid + 1
else :
end = mid - 1
return "No real root possible"
def accessory_cubic(A, B, C, D, x) :
ans = 0
ans = (A * x * x * x +
B * x * x + C * x + D)
return ans
def accesory_biquadratic1(a0, b0, c0):
a, b = b0 / a0, c0 / a0
a0 = -0.5*a
delta = a0*a0 - b
sqrt_delta = cmath.sqrt(delta)
r1 = a0 - sqrt_delta
r2 = a0 + sqrt_delta
return r1, r2
def accesory_biquadratic2(a0, b0, c0, d0):
a, b, c = b0 / a0, c0 / a0, d0 / a0
third = 1./3.
a13 = a*third
a2 = a13*a13
f = third*b - a2
g = a13 * (2*a2 - b) + c
h = 0.25*g*g + f*f*f
def cubic_root(x):
if x.real >= 0:
return x**third
else:
return -(-x)**third
if f == g == h == 0:
return -cubic_root(c)
elif h <= 0:
j = math.sqrt(-f)
k = math.acos(-0.5*g / (j*j*j))
m = math.cos(third*k)
return 2*j*m - a13
else:
sqrt_h = cmath.sqrt(h)
S = cubic_root(-0.5*g + sqrt_h)
U = cubic_root(-0.5*g - sqrt_h)
S_plus_U = S + U
return S_plus_U - a13
def biquadraticsolver(coeff):
"""
Solves a biquadratic equation
:param coeff: list of all the coefficients of an equation
:return: Roots of the equation
"""
a0,b0,c0,d0,e0,f0=coeff
e0=e0-f0
a, b, c, d = b0/a0, c0/a0, d0/a0, e0/a0
a0 = 0.25*a
a02 = a0*a0
p = 3*a02 - 0.5*b
q = a*a02 - b*a0 + 0.5*c
r = 3*a02*a02 - b*a02 + c*a0 - d
z0 = accesory_biquadratic2(1, p, r, p*r - 0.5*q*q)
s = cmath.sqrt(2*p + 2*z0.real + 0j)
if s == 0:
t = z0*z0 + r
else:
t = -q / s
r0, r1 = accesory_biquadratic1(1, s, z0 + t)
r2, r3 = accesory_biquadratic1(1, -s, z0 - t)
lst=[r0 - a0, r1 - a0, r2 - a0, r3 - a0]
arr=np.iscomplex(lst)
ans=[]
for i in range(len(arr)):
if arr[i]==False:
ans.append(lst[i].real)
if len(ans)==0:
return "No real root possible"
else:
return ans
# In[ ]:
|
import torch
from torch import nn
from tvae.nn.modules import (MultiHeadAttention, PositionalEmbedding,
PositionWise)
class TransformerEncoderLayer(nn.Module):
def __init__(self, dim_m, dim_q_k, dim_v, n_heads, dim_i, dropout):
"""Transformer encoder layer.
Args:
dim_m (int): Dimension of model.
dim_q_k (int): Dimension of `query` & `key` attention projections.
dim_v (int): Dimension of `value` attention projection.
n_heads (int): Number of attention heads.
dim_i (int): Inner dimension of feed-forward position-wise sublayer.
dropout (float): Dropout probability.
Inputs:
- **input** of shape `(batch, enc_seq_len, dim_m)`, a float tensor, where `batch` is batch size,
`enc_seq_len` is length of encoder sequence for this batch and `dim_m` is hidden size of model.
Input embedding has `dim_m` size too.
Outputs:
- **output** of shape `(batch, seq_len, dim_m)`, a float tensor.
"""
super(TransformerEncoderLayer, self).__init__()
self.attention = MultiHeadAttention(n_heads, dim_m, dim_q_k, dim_v,
dropout)
self.positionwise = PositionWise(dim_m, dim_i, dropout)
def forward(self, input):
enc_att = self.attention(input, input, input)
output = self.positionwise(enc_att)
return output
class TransformerDecoderLayer(nn.Module):
def __init__(self, dim_m, dim_q_k, dim_v, n_heads, dim_i, dropout):
"""Transformer decoder layer.
Args:
dim_m (int): Dimension of model.
dim_q_k (int): Dimension of `query` & `key` attention projections.
dim_v (int): Dimension of `value` attention projection.
n_heads (int): Number of attention heads.
dim_i (int): Inner dimension of feed-forward position-wise sublayer.
dropout (float): Dropout probability.
Inputs:
- **input** of shape `(batch, dec_seq_len, dim_m)`, a float tensor, where `batch` is batch size,
`dec_seq_len` is length of decoder sequence for this batch and `dim_m` is hidden size of model.
Input embedding has `dim_m` size too.
- **encoder_output** of shape `(batch, enc_seq_len, dim_m)`, a float tensor, where `enc_seq_len` is length
of encoder sequence.
- **mask** of shape `(batch, dec_seq_len, dec_sec_len)`, a byte tensor containing mask for
illegal connections between encoder and decoder sequence tokens. It's used to preserving
the auto-regressive property.
Outputs:
- **output** of shape `(batch, dec_seq_len, dim_m)`, a float tensor.
"""
super(TransformerDecoderLayer, self).__init__()
self.masked_attention = MultiHeadAttention(n_heads, dim_m, dim_q_k,
dim_v, dropout)
self.attention = MultiHeadAttention(n_heads, dim_m, dim_q_k, dim_v,
dropout)
self.positionwise = PositionWise(dim_m, dim_i, dropout)
def forward(self, input, encoder_output, mask):
dec_att = self.masked_attention(input, input, input, mask)
adj_att = self.attention(
value=encoder_output, key=encoder_output, query=dec_att)
output = self.positionwise(adj_att)
return output
class Transformer(nn.Module):
def __init__(self,
max_seq_len,
vocab_size,
emb_size=250,
embeddings=None,
n_layers=6,
dim_m=512,
dim_q_k=64,
dim_v=64,
n_heads=8,
dim_i=2048,
dropout=0.1):
"""Transformer model from 'Attention Is All You Need' paper.
Args:
max_seq_len (int): Maximum sequence length.
vocab_size (int): Vocabulary size.
emb_size (int, optional): Embedding size. You do not need to specify a value if you are using
embedding weights.
embeddings (torch.Tensor, optional): Long tensor of shape `(vocab_size, emb_size)` - embedding tensor.
Embedding size value would inherited from shape of this tensor.
n_layers (int, optional): Number of transformer layers.
dim_m (int, optional): Model hidden size, must be equal with embedding size.
dim_q_k (int, optional): Dimension of `query` & `key` attention projections.
dim_v (int, optional): Dimension of `value` attention projection.
n_heads (int, optional): Number of attention heads.
dim_i (int, optional): Inner dimension of feed-forward position-wise sublayer.
dropout (float, optional): Dropout probability.
Variables:
- **encoder_state**: a float tensor of shape `(batch, enc_seq_len, dim_m)` containing encoder state from
last layer.
Inputs:
- **enc_seq** of shape `(batch, enc_seq_len)`, a long tensor encoder input sequence.
- **dec_seq** of shape `(batch, dec_seq_len)`, a long tensor decoder input sequence.
Outputs:
- **output** of of shape `(batch, dec_seq_len, vocab_size)`, a float tensor of vocabulary probability
distribution.
Notes:
- For optimizing model, encoder state stores in local variable and calculate only one per batch. After
auto-regressive process encoder state must be reset. You can do this using
:func:`Transformer.reset_encoder_state`.
"""
super(Transformer, self).__init__()
self.positional_encoding = PositionalEmbedding(
max_seq_len, dim_m, vocab_size, emb_size, embeddings)
self.encoder_layers = nn.ModuleList([
TransformerEncoderLayer(dim_m, dim_q_k, dim_v, n_heads, dim_i,
dropout) for i in range(n_layers)
])
self.decoder_layers = nn.ModuleList([
TransformerDecoderLayer(dim_m, dim_q_k, dim_v, n_heads, dim_i,
dropout) for i in range(n_layers)
])
# I think it's better to use smooth transition from dim_m to vocab_size
self.out = nn.Sequential(
nn.Linear(dim_m, vocab_size),
# nn.ReLU(),
# nn.Linear(7000, vocab_size),
)
self.softmax = nn.Softmax(-1)
self.encoder_state = None
def forward(self, enc_seq, dec_seq):
# Calculate encoder state for batch.
if self.encoder_state is None:
# Sum embeddings with positional encodings.
self.encoder_state = self.positional_encoding(enc_seq)
for enc_layer in self.encoder_layers:
self.encoder_state = enc_layer(self.encoder_state)
# Decoder block.
# Apply positional encoding.
dec_state = self.positional_encoding(dec_seq)
mask = self.autoregressive_mask(dec_seq)
for dec_layer in self.decoder_layers:
dec_state = dec_layer(dec_state, self.encoder_state, mask)
output = self.out(dec_state)
return output
def reset_encoder_state(self):
"""Reset previous encoder state of batch. This method must calls before process new batch.
"""
self.encoder_state = None
@staticmethod
def autoregressive_mask(tensor):
"""Generate auto-regressive mask for tensor. It's used to preserving the auto-regressive property.
Args:
tensor (torch.Tensor): of shape `(batch, seq_len, dim)`.
Returns:
torch.Tensor: a byte mask tensor of shape `(batch, seq_len, seq_len)` containing mask for
illegal attention connections between decoder sequence tokens.
"""
batch_size, seq_len = tensor.shape
x = torch.ones(
seq_len, seq_len, device=tensor.device).tril(-1).transpose(0, 1)
return x.repeat(batch_size, 1, 1).byte()
|
<reponame>deepset-ai/Haystack<filename>haystack/nodes/question_generator/question_generator.py
from typing import List, Union, Optional, Iterator
import itertools
from transformers import AutoModelForSeq2SeqLM
from transformers import AutoTokenizer
from haystack.errors import HaystackError
from haystack.schema import Document
from haystack.nodes.base import BaseComponent
from haystack.nodes.preprocessor import PreProcessor
from haystack.modeling.utils import initialize_device_settings
class QuestionGenerator(BaseComponent):
"""
The Question Generator takes only a document as input and outputs questions that it thinks can be
answered by this document. In our current implementation, input texts are split into chunks of 50 words
with a 10 word overlap. This is because the default model `valhalla/t5-base-e2e-qg` seems to generate only
about 3 questions per passage regardless of length. Our approach prioritizes the creation of more questions
over processing efficiency (T5 is able to digest much more than 50 words at once). The returned questions
generally come in an order dictated by the order of their answers i.e. early questions in the list generally
come from earlier in the document.
"""
outgoing_edges = 1
def __init__(
self,
model_name_or_path="valhalla/t5-base-e2e-qg",
model_version=None,
num_beams=4,
max_length=256,
no_repeat_ngram_size=3,
length_penalty=1.5,
early_stopping=True,
split_length=50,
split_overlap=10,
use_gpu=True,
prompt="generate questions:",
num_queries_per_doc=1,
batch_size: Optional[int] = None,
):
"""
Uses the valhalla/t5-base-e2e-qg model by default. This class supports any question generation model that is
implemented as a Seq2SeqLM in HuggingFace Transformers. Note that this style of question generation (where the only input
is a document) is sometimes referred to as end-to-end question generation. Answer-supervised question
generation is not currently supported.
:param model_name_or_path: Directory of a saved model or the name of a public model e.g. "valhalla/t5-base-e2e-qg".
See https://huggingface.co/models for full list of available models.
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param use_gpu: Whether to use GPU or the CPU. Falls back on CPU if no GPU is available.
:param batch_size: Number of documents to process at a time.
"""
super().__init__()
self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
self.model.to(str(self.devices[0]))
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self.num_beams = num_beams
self.max_length = max_length
self.no_repeat_ngram_size = no_repeat_ngram_size
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.split_length = split_length
self.split_overlap = split_overlap
self.preprocessor = PreProcessor()
self.prompt = prompt
self.num_queries_per_doc = num_queries_per_doc
self.batch_size = batch_size
def run(self, documents: List[Document]): # type: ignore
generated_questions = []
for d in documents:
questions = self.generate(d.content)
curr_dict = {"document_id": d.id, "document_sample": d.content[:200], "questions": questions}
generated_questions.append(curr_dict)
output = {"generated_questions": generated_questions, "documents": documents}
return output, "output_1"
def run_batch(self, documents: Union[List[Document], List[List[Document]]], batch_size: Optional[int] = None): # type: ignore
generated_questions = []
if isinstance(documents[0], Document):
questions = self.generate_batch(texts=[d.content for d in documents if isinstance(d, Document)])
questions_iterator = questions # type: ignore
documents_iterator = documents
else:
questions = self.generate_batch(
texts=[[d.content for d in doc_list] for doc_list in documents if isinstance(doc_list, list)]
)
questions_iterator = itertools.chain.from_iterable(questions) # type: ignore
documents_iterator = itertools.chain.from_iterable(documents) # type: ignore
for cur_questions, doc in zip(questions_iterator, documents_iterator):
if not isinstance(doc, Document):
raise HaystackError(f"doc was of type {type(doc)}, but expected a Document.")
curr_dict = {"document_id": doc.id, "document_sample": doc.content[:200], "questions": cur_questions}
generated_questions.append(curr_dict)
output = {"generated_questions": generated_questions, "documents": documents}
return output, "output_1"
def generate(self, text: str) -> List[str]:
# Performing splitting because T5 has a max input length
# Also currently, it seems that it only generates about 3 questions for the beginning section of text
split_texts_docs = self.preprocessor.split(
document={"content": text},
split_by="word",
split_respect_sentence_boundary=False,
split_overlap=self.split_overlap,
split_length=self.split_length,
)
split_texts = [
f"{self.prompt} {text.content}" if self.prompt not in text.content else text.content
for text in split_texts_docs
]
tokenized = self.tokenizer(split_texts, return_tensors="pt", padding=True)
input_ids = tokenized["input_ids"].to(self.devices[0])
# Necessary if padding is enabled so the model won't attend pad tokens
attention_mask = tokenized["attention_mask"].to(self.devices[0])
tokens_output = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
num_beams=self.num_beams,
max_length=self.max_length,
no_repeat_ngram_size=self.no_repeat_ngram_size,
length_penalty=self.length_penalty,
early_stopping=self.early_stopping,
num_return_sequences=self.num_queries_per_doc,
)
string_output = self.tokenizer.batch_decode(tokens_output)
string_output = [cur_output.replace("<pad>", "").replace("</s>", "") for cur_output in string_output]
ret = []
for split in string_output:
for question in split.split("<sep>"):
question = question.strip()
if question and question not in ret:
ret.append(question)
return ret
def generate_batch(
self, texts: Union[List[str], List[List[str]]], batch_size: Optional[int] = None
) -> Union[List[List[str]], List[List[List[str]]]]:
"""
Generates questions for a list of strings or a list of lists of strings.
:param texts: List of str or list of list of str.
:param batch_size: Number of texts to process at a time.
"""
if batch_size is None:
batch_size = self.batch_size
if isinstance(texts[0], str):
single_doc_list = True
number_of_docs = [1 for text_list in texts]
text_iterator = texts
else:
single_doc_list = False
number_of_docs = [len(text_list) for text_list in texts]
text_iterator = itertools.chain.from_iterable(texts) # type: ignore
split_texts_docs = [
self.preprocessor.split(
document={"content": text},
split_by="word",
split_respect_sentence_boundary=False,
split_overlap=self.split_overlap,
split_length=self.split_length,
)
for text in text_iterator
]
split_texts = [[doc.content for doc in split if isinstance(doc.content, str)] for split in split_texts_docs]
number_of_splits = [len(split) for split in split_texts]
flat_split_texts = [
f"{self.prompt} {text}" if self.prompt not in text else text
for text in itertools.chain.from_iterable(split_texts)
]
batches = self._get_batches(flat_split_texts, batch_size=batch_size)
all_string_outputs = []
for batch in batches:
tokenized = self.tokenizer(batch, return_tensors="pt", padding=True)
input_ids = tokenized["input_ids"].to(self.devices[0])
# Necessary if padding is enabled so the model won't attend pad tokens
attention_mask = tokenized["attention_mask"].to(self.devices[0])
tokens_output = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
num_beams=self.num_beams,
max_length=self.max_length,
no_repeat_ngram_size=self.no_repeat_ngram_size,
length_penalty=self.length_penalty,
early_stopping=self.early_stopping,
num_return_sequences=self.num_queries_per_doc,
)
string_output = self.tokenizer.batch_decode(tokens_output)
string_output = [cur_output.replace("<pad>", "").replace("</s>", "") for cur_output in string_output]
all_string_outputs.extend(string_output)
# Group predictions together by split
grouped_predictions_split = []
left_idx = 0
right_idx = 0
for number in number_of_splits:
right_idx = left_idx + number
grouped_predictions_split.append(all_string_outputs[left_idx:right_idx])
left_idx = right_idx
# Group predictions together by doc list
grouped_predictions_doc_list = []
left_idx = 0
right_idx = 0
for number in number_of_docs:
right_idx = left_idx + number
grouped_predictions_doc_list.append(grouped_predictions_split[left_idx:right_idx])
left_idx = right_idx
results = []
for group in grouped_predictions_doc_list:
group_preds = []
for doc in group:
doc_preds = []
for split in doc:
for question in split.split("<sep>"):
question = question.strip()
if question and question not in doc_preds:
doc_preds.append(question)
group_preds.append(doc_preds)
if single_doc_list:
results.append(group_preds[0])
else:
results.append(group_preds)
return results
@staticmethod
def _get_batches(texts: List[str], batch_size: Optional[int]) -> Iterator[List[str]]:
if batch_size is None:
yield texts
return
else:
for index in range(0, len(texts), batch_size):
yield texts[index : index + batch_size]
|
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieves table metadata from Spanner."""
import collections
from typing import Any, Dict, Optional, Type
from spanner_orm import condition
from spanner_orm import field
from spanner_orm import index
from spanner_orm import metadata
from spanner_orm import model
from spanner_orm.admin import column
from spanner_orm.admin import index as index_schema
from spanner_orm.admin import index_column
from spanner_orm.admin import table
class SpannerMetadata(object):
"""Gathers information about a table from Spanner."""
@classmethod
def _class_name_from_table(cls, table_name: Optional[str]) -> Optional[str]:
if table_name:
return 'table_{}_model'.format(table_name)
return None
@classmethod
def models(cls) -> Dict[str, Type[model.Model]]:
"""Constructs model classes from Spanner table schema."""
tables = cls.tables()
indexes = cls.indexes()
models = {}
for table_name, table_data in tables.items():
primary_index = indexes[table_name][index.Index.PRIMARY_INDEX]
primary_keys = set(primary_index.columns)
klass = model.ModelMetaclass(
cls._class_name_from_table(table_name), (model.Model,), {})
for model_field in table_data['fields'].values():
model_field._primary_key = model_field.name in primary_keys # pylint: disable=protected-access
klass.meta = metadata.ModelMetadata(
table=table_name,
fields=table_data['fields'],
interleaved=cls._class_name_from_table(table_data['parent_table']),
indexes=indexes[table_name],
model_class=klass)
klass.meta.finalize()
models[table_name] = klass
return models
@classmethod
def model(cls, table_name) -> Optional[Type[model.Model]]:
return cls.models().get(table_name)
@classmethod
def tables(cls) -> Dict[str, Dict[str, Any]]:
"""Compiles table information from column schema."""
column_data = collections.defaultdict(dict)
columns = column.ColumnSchema.where(
condition.equal_to('table_catalog', ''),
condition.equal_to('table_schema', ''),
)
for column_row in columns:
new_field = field.Field(
column_row.field_type(), nullable=column_row.nullable())
new_field.name = column_row.column_name
new_field.position = column_row.ordinal_position
column_data[column_row.table_name][column_row.column_name] = new_field
table_data = collections.defaultdict(dict)
tables = table.TableSchema.where(
condition.equal_to('table_catalog', ''),
condition.equal_to('table_schema', ''),
)
for table_row in tables:
name = table_row.table_name
table_data[name]['parent_table'] = table_row.parent_table_name
table_data[name]['fields'] = column_data[name]
return table_data
@classmethod
def indexes(cls) -> Dict[str, Dict[str, Any]]:
"""Compiles index information from index and index columns schemas."""
# ordinal_position is the position of the column in the indicated index.
# Results are ordered by that so the index columns are added in the
# correct order.
index_column_schemas = index_column.IndexColumnSchema.where(
condition.equal_to('table_catalog', ''),
condition.equal_to('table_schema', ''),
condition.order_by(('ordinal_position', condition.OrderType.ASC)),
)
index_columns = collections.defaultdict(list)
storing_columns = collections.defaultdict(list)
for schema in index_column_schemas:
key = (schema.table_name, schema.index_name)
if schema.ordinal_position is not None:
index_columns[key].append(schema.column_name)
else:
storing_columns[key].append(schema.column_name)
index_schemas = index_schema.IndexSchema.where(
condition.equal_to('table_catalog', ''),
condition.equal_to('table_schema', ''),
)
indexes = collections.defaultdict(dict)
for schema in index_schemas:
key = (schema.table_name, schema.index_name)
new_index = index.Index(
index_columns[key],
parent=schema.parent_table_name,
null_filtered=schema.is_null_filtered,
unique=schema.is_unique,
storing_columns=storing_columns[key])
new_index.name = schema.index_name
indexes[schema.table_name][schema.index_name] = new_index
return indexes
|
<reponame>vika-sonne/sam-ba-loader
#
# Open Source SAM-BA Programmer
# Copyright (C) <NAME>, 2016.
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
#
# Released under a MIT license, see LICENCE.txt.
from .import ChipIdentifier
class CHIPID(ChipIdentifier.ChipIdentifierBase):
"""CHIPID chip identifier module, used to read out the chip identification
registers of a SAM device that contains a CHIPID module, and extract out
the various fields for later comparison against reference part values.
"""
CIDR_OFFSET = 0x0000
EXID_OFFSET = 0x0004
FLASH_BANK_SIZE = {
0 : 'NONE',
1 : '8KB',
2 : '16KB',
3 : '32KB',
5 : '64KB',
7 : '128KB',
9 : '256KB',
10 : '512KB',
12 : '1024KB',
14 : '2048KB',
}
SRAM_SIZE = {
0 : '48KB',
1 : '1KB',
2 : '2KB',
3 : '6KB',
4 : '24KB',
5 : '4KB',
6 : '80KB',
7 : '160KB',
8 : '8KB',
9 : '16KB',
10 : '32KB',
11 : '64KB',
12 : '128KB',
13 : '256KB',
14 : '96KB',
15 : '512KB',
}
PROCESSOR = {
0 : 'Cortex-M7',
1 : 'ARM946ES',
2 : 'ARM7TDMI',
3 : 'Cortex-M3',
4 : 'ARM920T',
5 : 'ARM926EJS',
6 : 'Cortex-A5',
7 : 'Cortex-M4',
}
ARCHITECTURE = {
0x19 : 'AT91SAM9xx Series',
0x29 : 'AT91SAM9XExx Series',
0x34 : 'AT91x34 Series',
0x37 : 'CAP7 Series',
0x39 : 'CAP9 Series',
0x3B : 'CAP11 Series',
0x40 : 'AT91x40 Series',
0x42 : 'AT91x42 Series',
0x55 : 'AT91x55 Series',
0x60 : 'AT91SAM7Axx Series',
0x61 : 'AT91SAM7AQxx Series',
0x63 : 'AT91x63 Series',
0x70 : 'AT91SAM7Sxx Series',
0x71 : 'AT91SAM7XCxx Series',
0x72 : 'AT91SAM7SExx Series',
0x73 : 'AT91SAM7Lxx Series',
0x75 : 'AT91SAM7Xxx Series',
0x76 : 'AT91SAM7SLxx Series',
0x80 : 'SAM3UxC Series (100-pin version)',
0x81 : 'SAM3UxE Series (144-pin version)',
0x83 : 'SAM3AxC Series (100-pin version)',
0x84 : 'SAM3XxC Series (100-pin version)',
0x85 : 'SAM3XxE Series (144-pin version)',
0x86 : 'SAM3XxG Series (217-pin version)',
0x88 : 'SAM4SxA (48-pin version)',
0x89 : 'SAM4SxB (64-pin version)',
0x8A : 'SAM4SxC (100-pin version)',
0x92 : 'AT91x92 Series',
0x93 : 'SAM3NxA Series (48-pin version)',
0x94 : 'SAM3NxB Series (64-pin version)',
0x95 : 'SAM3NxC Series (100-pin version)',
0x98 : 'SAM4SDxA Series (48-pin version)',
0x99 : 'SAM4SDxB Series (64-pin version)',
0x9A : 'SAM4SDxC Series (100-pin version)',
0xA5 : 'SAM5A',
0xF0 : 'AT75Cxx Series',
}
def __init__(self, base_address):
"""Initializes a CHIPID chip identifier instance at the specified base
address in the attached device.
Args:
base_address -- Base address of the CHIPID module within the
internal address space of the attached device.
"""
self.base_address = base_address
def __str__(self):
"""Conversion method to serialize the parsed chip identifier values out
as a string.
Returns:
Chip identifier values as a human readable string suitable for
printing to a console.
"""
ret = 'CHIPID @ 0x{:08X}: 0x{:08X}'.format(self.base_address, self.chip_id)
ret += '\n\tVersion:\t' + str(self.version)
ret += '\n\tProcessor:\t' + self._lookup(self.PROCESSOR, self.processor)
ret += '\n\tArchitecture:\t' + self._lookup(self.ARCHITECTURE, self.architecture)
ret += '\n\tFlash Bank 0:\t' + self._lookup(self.FLASH_BANK_SIZE, self.flash[0])
ret += '\n\tFlash Bank 1:\t' + self._lookup(self.FLASH_BANK_SIZE, self.flash[1])
ret += '\n\tSRAM:\t\t' + self._lookup(self.SRAM_SIZE, self.sram)
ret += '\n\tExtended ID:\t' + str(self.extended_chip_id)
return ret
def _lookup(self, table, value):
"""Internal lookup helper function, searching a lookup table for the
specified value, or returning the raw value and an unknown identifier
warning.
Args:
table -- Lookup table to examine.
value -- Value to search for in the table.
Returns:
String matching the value in the table if found, or the raw value
(as a string) if not.
"""
return table[value] if value in table else '{0} (0x{0:X}) (Unknown)'.format(value)
def read(self, samba):
"""Reads and parses the chip identification values from the attached
device. Parsed values are then stored in the class instance, and can
be extracted later for matching against a specific device.
Args:
samba -- Core `SAMBA` instance bound to the device.
"""
self.chip_id = samba.read_word(self.base_address + self.CIDR_OFFSET)
if self.chip_id == 0:
return False
self.extended_chip_id = samba.read_word(self.base_address + self.EXID_OFFSET)
self.version = (self.chip_id >> 0) & 0x00001F
self.processor = (self.chip_id >> 5) & 0x000007
self.flash = [(self.chip_id >> 8) & 0x00000F, (self.chip_id >> 12) & 0x00000F]
self.sram = (self.chip_id >> 16) & 0x00000F
self.architecture = (self.chip_id >> 20) & 0x0000FF
return True
|
<filename>isi_sdk/apis/storagepool_api.py
# coding: utf-8
"""
StoragepoolApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class StoragepoolApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_compatibilities_class_active_item(self, compatibilities_class_active_item, **kwargs):
"""
Create a new compatibility
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_compatibilities_class_active_item(compatibilities_class_active_item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CompatibilitiesClassActiveItem compatibilities_class_active_item: (required)
:return: CreateCompatibilitiesClassActiveItemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_class_active_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_compatibilities_class_active_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_class_active_item' is set
if ('compatibilities_class_active_item' not in params) or (params['compatibilities_class_active_item'] is None):
raise ValueError("Missing the required parameter `compatibilities_class_active_item` when calling `create_compatibilities_class_active_item`")
resource_path = '/platform/1/storagepool/compatibilities/class/active'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'compatibilities_class_active_item' in params:
body_params = params['compatibilities_class_active_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateCompatibilitiesClassActiveItemResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_compatibilities_ssd_active_item(self, compatibilities_ssd_active_item, **kwargs):
"""
Create a new ssd compatibility
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_compatibilities_ssd_active_item(compatibilities_ssd_active_item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CompatibilitiesSsdActiveItem compatibilities_ssd_active_item: (required)
:return: CreateCompatibilitiesClassActiveItemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_ssd_active_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_compatibilities_ssd_active_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_ssd_active_item' is set
if ('compatibilities_ssd_active_item' not in params) or (params['compatibilities_ssd_active_item'] is None):
raise ValueError("Missing the required parameter `compatibilities_ssd_active_item` when calling `create_compatibilities_ssd_active_item`")
resource_path = '/platform/3/storagepool/compatibilities/ssd/active'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'compatibilities_ssd_active_item' in params:
body_params = params['compatibilities_ssd_active_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateCompatibilitiesClassActiveItemResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_storagepool_nodepool(self, storagepool_nodepool, **kwargs):
"""
Create a new node pool.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_storagepool_nodepool(storagepool_nodepool, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StoragepoolNodepoolCreateParams storagepool_nodepool: (required)
:return: CreateStoragepoolTierResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_nodepool']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storagepool_nodepool" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_nodepool' is set
if ('storagepool_nodepool' not in params) or (params['storagepool_nodepool'] is None):
raise ValueError("Missing the required parameter `storagepool_nodepool` when calling `create_storagepool_nodepool`")
resource_path = '/platform/3/storagepool/nodepools'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'storagepool_nodepool' in params:
body_params = params['storagepool_nodepool']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateStoragepoolTierResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_storagepool_tier(self, storagepool_tier, **kwargs):
"""
Create a new tier.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_storagepool_tier(storagepool_tier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StoragepoolTierCreateParams storagepool_tier: (required)
:return: CreateStoragepoolTierResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_tier']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storagepool_tier" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_tier' is set
if ('storagepool_tier' not in params) or (params['storagepool_tier'] is None):
raise ValueError("Missing the required parameter `storagepool_tier` when calling `create_storagepool_tier`")
resource_path = '/platform/1/storagepool/tiers'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'storagepool_tier' in params:
body_params = params['storagepool_tier']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateStoragepoolTierResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_compatibilities_class_active_by_id(self, compatibilities_class_active_id, **kwargs):
"""
Delete an active compatibility by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_compatibilities_class_active_by_id(compatibilities_class_active_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str compatibilities_class_active_id: Delete an active compatibility by id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_class_active_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_compatibilities_class_active_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_class_active_id' is set
if ('compatibilities_class_active_id' not in params) or (params['compatibilities_class_active_id'] is None):
raise ValueError("Missing the required parameter `compatibilities_class_active_id` when calling `delete_compatibilities_class_active_by_id`")
resource_path = '/platform/1/storagepool/compatibilities/class/active/{CompatibilitiesClassActiveId}'.replace('{format}', 'json')
path_params = {}
if 'compatibilities_class_active_id' in params:
path_params['CompatibilitiesClassActiveId'] = params['compatibilities_class_active_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_compatibilities_ssd_active_by_id(self, compatibilities_ssd_active_id, **kwargs):
"""
Delete an active ssd compatibility by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_compatibilities_ssd_active_by_id(compatibilities_ssd_active_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str compatibilities_ssd_active_id: Delete an active ssd compatibility by id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_ssd_active_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_compatibilities_ssd_active_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_ssd_active_id' is set
if ('compatibilities_ssd_active_id' not in params) or (params['compatibilities_ssd_active_id'] is None):
raise ValueError("Missing the required parameter `compatibilities_ssd_active_id` when calling `delete_compatibilities_ssd_active_by_id`")
resource_path = '/platform/3/storagepool/compatibilities/ssd/active/{CompatibilitiesSsdActiveId}'.replace('{format}', 'json')
path_params = {}
if 'compatibilities_ssd_active_id' in params:
path_params['CompatibilitiesSsdActiveId'] = params['compatibilities_ssd_active_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_storagepool_nodepool(self, storagepool_nodepool_id, **kwargs):
"""
Delete node pool.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_storagepool_nodepool(storagepool_nodepool_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str storagepool_nodepool_id: Delete node pool. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_nodepool_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storagepool_nodepool" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_nodepool_id' is set
if ('storagepool_nodepool_id' not in params) or (params['storagepool_nodepool_id'] is None):
raise ValueError("Missing the required parameter `storagepool_nodepool_id` when calling `delete_storagepool_nodepool`")
resource_path = '/platform/3/storagepool/nodepools/{StoragepoolNodepoolId}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_nodepool_id' in params:
path_params['StoragepoolNodepoolId'] = params['storagepool_nodepool_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_storagepool_tier(self, storagepool_tier_id, **kwargs):
"""
Delete tier.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_storagepool_tier(storagepool_tier_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str storagepool_tier_id: Delete tier. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_tier_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storagepool_tier" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_tier_id' is set
if ('storagepool_tier_id' not in params) or (params['storagepool_tier_id'] is None):
raise ValueError("Missing the required parameter `storagepool_tier_id` when calling `delete_storagepool_tier`")
resource_path = '/platform/1/storagepool/tiers/{StoragepoolTierId}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_tier_id' in params:
path_params['StoragepoolTierId'] = params['storagepool_tier_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_storagepool_tiers(self, **kwargs):
"""
Delete all tiers.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_storagepool_tiers(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storagepool_tiers" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/tiers'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_compatibilities_class_active_by_id(self, compatibilities_class_active_id, **kwargs):
"""
Get an active compatibilities by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_compatibilities_class_active_by_id(compatibilities_class_active_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str compatibilities_class_active_id: Get an active compatibilities by id (required)
:return: CompatibilitiesClassActive
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_class_active_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_compatibilities_class_active_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_class_active_id' is set
if ('compatibilities_class_active_id' not in params) or (params['compatibilities_class_active_id'] is None):
raise ValueError("Missing the required parameter `compatibilities_class_active_id` when calling `get_compatibilities_class_active_by_id`")
resource_path = '/platform/1/storagepool/compatibilities/class/active/{CompatibilitiesClassActiveId}'.replace('{format}', 'json')
path_params = {}
if 'compatibilities_class_active_id' in params:
path_params['CompatibilitiesClassActiveId'] = params['compatibilities_class_active_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CompatibilitiesClassActive',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_compatibilities_class_available(self, **kwargs):
"""
Get a list of available compatibilities
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_compatibilities_class_available(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CompatibilitiesClassAvailable
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_compatibilities_class_available" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/compatibilities/class/available'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CompatibilitiesClassAvailable',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_compatibilities_ssd_active_by_id(self, compatibilities_ssd_active_id, **kwargs):
"""
Get a active ssd compatibilities by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_compatibilities_ssd_active_by_id(compatibilities_ssd_active_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str compatibilities_ssd_active_id: Get a active ssd compatibilities by id (required)
:return: CompatibilitiesSsdActive
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_ssd_active_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_compatibilities_ssd_active_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_ssd_active_id' is set
if ('compatibilities_ssd_active_id' not in params) or (params['compatibilities_ssd_active_id'] is None):
raise ValueError("Missing the required parameter `compatibilities_ssd_active_id` when calling `get_compatibilities_ssd_active_by_id`")
resource_path = '/platform/3/storagepool/compatibilities/ssd/active/{CompatibilitiesSsdActiveId}'.replace('{format}', 'json')
path_params = {}
if 'compatibilities_ssd_active_id' in params:
path_params['CompatibilitiesSsdActiveId'] = params['compatibilities_ssd_active_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CompatibilitiesSsdActive',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_compatibilities_ssd_available(self, **kwargs):
"""
Get a list of available ssd compatibilities
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_compatibilities_ssd_available(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CompatibilitiesSsdAvailable
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_compatibilities_ssd_available" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/compatibilities/ssd/available'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CompatibilitiesSsdAvailable',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_nodepool(self, storagepool_nodepool_id, **kwargs):
"""
Retrieve node pool information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_nodepool(storagepool_nodepool_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str storagepool_nodepool_id: Retrieve node pool information. (required)
:return: StoragepoolNodepools
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_nodepool_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_nodepool" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_nodepool_id' is set
if ('storagepool_nodepool_id' not in params) or (params['storagepool_nodepool_id'] is None):
raise ValueError("Missing the required parameter `storagepool_nodepool_id` when calling `get_storagepool_nodepool`")
resource_path = '/platform/3/storagepool/nodepools/{StoragepoolNodepoolId}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_nodepool_id' in params:
path_params['StoragepoolNodepoolId'] = params['storagepool_nodepool_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolNodepools',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_settings(self, **kwargs):
"""
List all settings.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_settings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StoragepoolSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_settings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/settings'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolSettings',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_status(self, **kwargs):
"""
List any health conditions detected.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_status(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StoragepoolStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_status" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/status'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolStatus',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_storagepools(self, **kwargs):
"""
List all storage pools.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_storagepools(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StoragepoolStoragepools
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_storagepools" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/3/storagepool/storagepools'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolStoragepools',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_suggested_protection_nid(self, storagepool_suggested_protection_nid, **kwargs):
"""
Retrieve the suggested protection policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_suggested_protection_nid(storagepool_suggested_protection_nid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str storagepool_suggested_protection_nid: Retrieve the suggested protection policy. (required)
:return: StoragepoolSuggestedProtection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_suggested_protection_nid']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_suggested_protection_nid" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_suggested_protection_nid' is set
if ('storagepool_suggested_protection_nid' not in params) or (params['storagepool_suggested_protection_nid'] is None):
raise ValueError("Missing the required parameter `storagepool_suggested_protection_nid` when calling `get_storagepool_suggested_protection_nid`")
resource_path = '/platform/3/storagepool/suggested-protection/{StoragepoolSuggestedProtectionNid}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_suggested_protection_nid' in params:
path_params['StoragepoolSuggestedProtectionNid'] = params['storagepool_suggested_protection_nid']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolSuggestedProtection',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_tier(self, storagepool_tier_id, **kwargs):
"""
Retrieve tier information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_tier(storagepool_tier_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str storagepool_tier_id: Retrieve tier information. (required)
:return: StoragepoolTiers
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_tier_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_tier" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_tier_id' is set
if ('storagepool_tier_id' not in params) or (params['storagepool_tier_id'] is None):
raise ValueError("Missing the required parameter `storagepool_tier_id` when calling `get_storagepool_tier`")
resource_path = '/platform/1/storagepool/tiers/{StoragepoolTierId}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_tier_id' in params:
path_params['StoragepoolTierId'] = params['storagepool_tier_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolTiers',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_storagepool_unprovisioned(self, **kwargs):
"""
Get the unprovisioned nodes and drives
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storagepool_unprovisioned(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StoragepoolUnprovisioned
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storagepool_unprovisioned" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/unprovisioned'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolUnprovisioned',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_compatibilities_class_active(self, **kwargs):
"""
Get a list of active compatibilities
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_compatibilities_class_active(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CompatibilitiesClassActiveExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_compatibilities_class_active" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/compatibilities/class/active'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CompatibilitiesClassActiveExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_compatibilities_ssd_active(self, **kwargs):
"""
Get a list of active ssd compatibilities
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_compatibilities_ssd_active(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CompatibilitiesSsdActiveExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_compatibilities_ssd_active" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/3/storagepool/compatibilities/ssd/active'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CompatibilitiesSsdActiveExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_storagepool_nodepools(self, **kwargs):
"""
List all node pools.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_storagepool_nodepools(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StoragepoolNodepoolsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storagepool_nodepools" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/3/storagepool/nodepools'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolNodepoolsExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_storagepool_tiers(self, **kwargs):
"""
List all tiers.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_storagepool_tiers(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StoragepoolTiersExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storagepool_tiers" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/storagepool/tiers'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StoragepoolTiersExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_compatibilities_ssd_active_by_id(self, compatibilities_ssd_active_id_params, compatibilities_ssd_active_id, **kwargs):
"""
Modify an ssd compatibility by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_compatibilities_ssd_active_by_id(compatibilities_ssd_active_id_params, compatibilities_ssd_active_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CompatibilitiesSsdActiveIdParams compatibilities_ssd_active_id_params: (required)
:param str compatibilities_ssd_active_id: Modify an ssd compatibility by id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['compatibilities_ssd_active_id_params', 'compatibilities_ssd_active_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_compatibilities_ssd_active_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'compatibilities_ssd_active_id_params' is set
if ('compatibilities_ssd_active_id_params' not in params) or (params['compatibilities_ssd_active_id_params'] is None):
raise ValueError("Missing the required parameter `compatibilities_ssd_active_id_params` when calling `update_compatibilities_ssd_active_by_id`")
# verify the required parameter 'compatibilities_ssd_active_id' is set
if ('compatibilities_ssd_active_id' not in params) or (params['compatibilities_ssd_active_id'] is None):
raise ValueError("Missing the required parameter `compatibilities_ssd_active_id` when calling `update_compatibilities_ssd_active_by_id`")
resource_path = '/platform/3/storagepool/compatibilities/ssd/active/{CompatibilitiesSsdActiveId}'.replace('{format}', 'json')
path_params = {}
if 'compatibilities_ssd_active_id' in params:
path_params['CompatibilitiesSsdActiveId'] = params['compatibilities_ssd_active_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'compatibilities_ssd_active_id_params' in params:
body_params = params['compatibilities_ssd_active_id_params']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_storagepool_nodepool(self, storagepool_nodepool, storagepool_nodepool_id, **kwargs):
"""
Modify node pool. All input fields are optional, but one or more must be supplied.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_storagepool_nodepool(storagepool_nodepool, storagepool_nodepool_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StoragepoolNodepool storagepool_nodepool: (required)
:param str storagepool_nodepool_id: Modify node pool. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_nodepool', 'storagepool_nodepool_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_storagepool_nodepool" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_nodepool' is set
if ('storagepool_nodepool' not in params) or (params['storagepool_nodepool'] is None):
raise ValueError("Missing the required parameter `storagepool_nodepool` when calling `update_storagepool_nodepool`")
# verify the required parameter 'storagepool_nodepool_id' is set
if ('storagepool_nodepool_id' not in params) or (params['storagepool_nodepool_id'] is None):
raise ValueError("Missing the required parameter `storagepool_nodepool_id` when calling `update_storagepool_nodepool`")
resource_path = '/platform/3/storagepool/nodepools/{StoragepoolNodepoolId}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_nodepool_id' in params:
path_params['StoragepoolNodepoolId'] = params['storagepool_nodepool_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'storagepool_nodepool' in params:
body_params = params['storagepool_nodepool']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_storagepool_settings(self, storagepool_settings, **kwargs):
"""
Modify one or more settings.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_storagepool_settings(storagepool_settings, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StoragepoolSettingsExtended storagepool_settings: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_settings']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_storagepool_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_settings' is set
if ('storagepool_settings' not in params) or (params['storagepool_settings'] is None):
raise ValueError("Missing the required parameter `storagepool_settings` when calling `update_storagepool_settings`")
resource_path = '/platform/1/storagepool/settings'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'storagepool_settings' in params:
body_params = params['storagepool_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_storagepool_tier(self, storagepool_tier, storagepool_tier_id, **kwargs):
"""
Modify tier. All input fields are optional, but one or more must be supplied.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_storagepool_tier(storagepool_tier, storagepool_tier_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StoragepoolTier storagepool_tier: (required)
:param str storagepool_tier_id: Modify tier. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storagepool_tier', 'storagepool_tier_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_storagepool_tier" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storagepool_tier' is set
if ('storagepool_tier' not in params) or (params['storagepool_tier'] is None):
raise ValueError("Missing the required parameter `storagepool_tier` when calling `update_storagepool_tier`")
# verify the required parameter 'storagepool_tier_id' is set
if ('storagepool_tier_id' not in params) or (params['storagepool_tier_id'] is None):
raise ValueError("Missing the required parameter `storagepool_tier_id` when calling `update_storagepool_tier`")
resource_path = '/platform/1/storagepool/tiers/{StoragepoolTierId}'.replace('{format}', 'json')
path_params = {}
if 'storagepool_tier_id' in params:
path_params['StoragepoolTierId'] = params['storagepool_tier_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'storagepool_tier' in params:
body_params = params['storagepool_tier']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
# coding=utf-8
import numpy as np
import scipy.interpolate as intpl
import scipy.sparse as sprs
def to_sparse(D, format="csc"):
"""
Transform dense matrix to sparse matrix of return_type
bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix
coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format.
csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix
csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix
dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage
dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix.
lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix
:param D: Dense matrix
:param format: how to save the sparse matrix
:return: sparse version
"""
if format == "bsr":
return sprs.bsr_matrix(D)
elif format == "coo":
return sprs.coo_matrix(D)
elif format == "csc":
return sprs.csc_matrix(D)
elif format == "csr":
return sprs.csr_matrix(D)
elif format == "dia":
return sprs.dia_matrix(D)
elif format == "dok":
return sprs.dok_matrix(D)
elif format == "lil":
return sprs.lil_matrix(D)
else:
return to_dense(D)
def to_dense(D):
if sprs.issparse(D):
return D.toarray()
elif isinstance(D, np.ndarray):
return D
def next_neighbors_periodic(p, ps, k, T=None):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors and an array containing the
"""
if T is None:
T = ps[-1]-2*ps[0]+ps[1]
p_bar = p - np.floor(p/T)*T
ps = ps - ps[0]
distance_to_p = []
for tk in ps:
d1 = tk+T-p_bar
d2 = tk-p_bar
d3 = tk-T-p_bar
min_d = min([np.abs(d1), np.abs(d2), np.abs(d3)])
if np.abs(d1) == min_d:
distance_to_p.append(d1)
elif np.abs(d2) == min_d:
distance_to_p.append(d2)
else:
distance_to_p.append(d3)
distance_to_p = np.asarray(distance_to_p)
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted_by_abs = sorted(value_index,cmp=lambda x,y:cmp(np.abs(x),np.abs(y)), key=lambda s: s[0])
if k % 2 == 1:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k+1], key=lambda s: s[0])[:k]
else:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k], key=lambda s: s[0])
return map(lambda s: s[1], value_index_sorted_by_sign), map(lambda s: s[0]+p, value_index_sorted_by_sign)
def next_neighbors(p, ps, k):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors
"""
distance_to_p = np.abs(ps-p)
# zip it
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d,i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def continue_periodic_array(arr,nn,T):
nn = np.asarray(nn)
d_nn = nn[1:]-nn[:-1]
if np.all(d_nn == np.ones(nn.shape[0]-1)):
return arr[nn]
else:
cont_arr = [arr[nn[0]]]
shift = 0.
for n,d in zip(nn[1:],d_nn):
if d != 1:
shift = -T
cont_arr.append(arr[n]+shift)
return np.asarray(cont_arr)
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the restriction matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a restriction matrix
"""
M = np.zeros((coarse_grid.size, fine_grid.size))
n_g = coarse_grid.size
for i, p in zip(range(n_g), coarse_grid):
if periodic:
nn, cont_arr = next_neighbors_periodic(p, fine_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, fine_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(fine_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the interpolation matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a interpolation matrix
"""
M = np.zeros((fine_grid.size, coarse_grid.size))
n_f = fine_grid.size
for i, p in zip(range(n_f), fine_grid):
if periodic:
nn,cont_arr = next_neighbors_periodic(p, coarse_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, coarse_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(coarse_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def kron_on_list(matrix_list):
"""
:param matrix_list: a list of sparse matrices
:return: a matrix
"""
if len(matrix_list) == 2:
return sprs.kron(matrix_list[0], matrix_list[1])
elif len(matrix_list) == 1:
return matrix_list[0]
else:
return sprs.kron(matrix_list[0], kron_on_list(matrix_list[1:]))
def matrixN(tau, rows=-1, last_value=1.0):
n = tau.shape[0]
if rows == -1:
rows = n
N = np.zeros((rows, n))
# construct the lagrange polynomials
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
for i in range(n):
lag_pol.append(intpl.lagrange(tau, np.roll(circulating_one, i)))
N[:, i] = -np.ones(rows)*lag_pol[-1](last_value)
return N
def interpolate_to_t_end(nodes_on_unit, values):
"""
Assume a GaussLegendre nodes, we are interested in the value at the end of
the interval, but we now only the values in the interior of the interval.
We compute the value by legendre interpolation.
:param nodes_on_unit: nodes transformed to the unit interval
:param values: values on those nodes
:return: interpolation to the end of the interval
"""
n = nodes_on_unit.shape[0]
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
result = np.zeros(values[0].shape)
for i in range(n):
lag_pol.append(intpl.lagrange(nodes_on_unit, np.roll(circulating_one, i)))
result += values[i]*lag_pol[-1](1.0)
return result
|
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import time
# Initialize browser
def init_browser():
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
return Browser('chrome', **executable_path, headless=False)
def scrape():
# # NASA Mars News
# Initialize browser
browser = init_browser()
# Create a dictionary for storing all scraped data
mars_data = {}
# URL of NASA Mars News Site to be scraped
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
# 5 second wait for loading data
time.sleep(5)
# HTML object
html = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html, 'html.parser')
# Retrieve the latest news title and paragraph text and assign correponding variables
news = soup.find('div', class_='list_text')
title = news.find('div', class_='content_title').text
text = news.find('div', class_='article_teaser_body').text
# Add the news title and paragraph text to dictionary
mars_data['title'] = title
mars_data['text'] = text
# # JPL Mars Space Images - Featured Image
# Initialize browser
browser = init_browser()
# URL of JPL Featured Space Image to be scraped using Splinter
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Click on "Full Image" button
browser.find_by_id('full_image').click()
# HTML object
html = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html, 'html.parser')
# Get relative path of full image
image_url = soup.find("a", class_="button fancybox")['data-fancybox-href']
# Create variable for base URL
base_url = 'https://www.jpl.nasa.gov'
# Create full URL for featured image
featured_image_url = base_url + image_url
# Add featured image URL to dictionary
mars_data['featured_image_url'] = featured_image_url
# # Mars Facts
# Initialize browser
browser = init_browser()
# Visit the Mars Facts Webpage
url = 'http://space-facts.com/mars/'
browser.visit(url)
# Use Pandas to scrape the table containing Mars facts
table = pd.read_html(url)
# Select table
mars_facts_DF = table[0]
# Format table by adding column names and setting index
mars_facts_DF.columns = ['Description', 'Values']
mars_facts_DF = mars_facts_DF.set_index('Description')
# Use Pandas to convert the data to a HTML table string and store in variable
mars_table = mars_facts_DF.to_html().replace('\n', ' ')
# Add mars facts to dictionary
mars_data['mars_table'] = mars_table
# # Mars Hemispheres
# Initialize browser
browser = init_browser()
# Visit the USGS Astrogeology site
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# HTML object
html = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html, 'html.parser')
# Lists to store hemisphere title and full image URL
hemisphere_image_urls = []
# Loop through all products and get relevant information
for x in range(4):
# Identify item link and click on it
item = browser.find_by_tag('h3')
item[x].click()
# Create HTML browser object and parse with Beautiful Soup
html = browser.html
soup = bs(html, 'html.parser')
# Get hemisphere title and full image URL
img_title = soup.find('h2', class_='title').text
img_url = soup.find('a', target='_blank')['href']
# Create dictionary and append to list storing all hemisphere titles and image URLs.
dictionary = {"title":img_title,"img_url":img_url}
hemisphere_image_urls.append(dictionary)
# Click on Back button to return to previous site
browser.back()
# Add list storing all hemisphere titles and image URLs to mars data dict
mars_data['hemisphere_image_urls'] = hemisphere_image_urls
# Print mars_info dictionary
return mars_data |
import os
import shutil
import zipfile
import urllib.parse
import urllib.request
import torch
import torch.utils.data
from dataset import *
import pickle
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
import csv
from collections import Counter, defaultdict
def maybe_download_and_unzip_file(file_url, file_name=None):
if file_name is None:
file_name = os.path.basename(file_url)
if not os.path.exists(file_name):
print(f'Downloading: {file_name}')
with urllib.request.urlopen(file_url) as response, open(file_name, 'wb') as target_file:
shutil.copyfileobj(response, target_file)
print(f'Downloaded: {file_name}')
file_extension = os.path.splitext(file_name)[1]
if file_extension == '.zip':
print(f'Extracting zip: {file_name}')
with zipfile.ZipFile(file_name, 'r') as zip_file:
zip_file.extractall('.')
else:
print(f'Exists: {file_name}')
def load_model(model_class, filename):
def _map_location(storage, loc):
return storage
# load trained on GPU models to CPU
map_location = None
if not torch.cuda.is_available():
map_location = _map_location
state = torch.load(str(filename), map_location=map_location)
model = model_class(**state['model_params'])
model.load_state_dict(state['model_state'])
return model
def save_model(model, filename, model_params=None):
if isinstance(model, torch.nn.DataParallel):
model = model.module
state = {
'model_params': model_params or {},
'model_state': model.state_dict(),
}
torch.save(state, str(filename))
def cal_deviation(hidden_val, golds_treatment, logits_treatment, normalized=False):
ones_idx, zeros_idx = np.where(golds_treatment == 1), np.where(golds_treatment == 0)
hidden_val = np.asarray(hidden_val)
hidden_treated, hidden_controlled = hidden_val[ones_idx], hidden_val[zeros_idx]
if not normalized:
logits_treatment = 1 / (1 + np.exp(-logits_treatment))
p_T = len(ones_idx[0])/(len(ones_idx[0])+len(zeros_idx[0]))
treated_w, controlled_w = p_T/logits_treatment[ones_idx], (1-p_T)/(1.-logits_treatment[zeros_idx])
treated_w = np.clip(treated_w, a_min=np.quantile(treated_w, 0.01),
a_max=np.quantile(treated_w, 0.99))
controlled_w = np.clip(controlled_w, a_min=np.quantile(controlled_w, 0.01),
a_max=np.quantile(controlled_w, 0.99))
treated_w, controlled_w = np.reshape(treated_w, (len(treated_w),1)), np.reshape(controlled_w, (len(controlled_w),1))
hidden_treated_w, hidden_controlled_w = np.multiply(hidden_treated, treated_w), np.multiply(hidden_controlled, controlled_w)
hidden_treated_mu, hidden_treated_var = np.mean(hidden_treated, axis=0), np.var(hidden_treated, axis=0)
hidden_controlled_mu, hidden_controlled_var = np.mean(hidden_controlled, axis=0), np.var(hidden_controlled, axis=0)
VAR = np.sqrt((hidden_treated_var + hidden_controlled_var) / 2)
hidden_deviation = np.abs(hidden_treated_mu - hidden_controlled_mu) / VAR
hidden_deviation[np.isnan(hidden_deviation)] = 0
max_unbalanced_original = np.max(hidden_deviation)
hidden_treated_w_mu, hidden_treated_w_var = np.mean(hidden_treated_w, axis=0), np.var(hidden_treated_w, axis=0)
hidden_controlled_w_mu, hidden_controlled_w_var = np.mean(hidden_controlled_w, axis=0), np.var(hidden_controlled_w, axis=0)
VAR = np.sqrt((hidden_treated_w_var + hidden_controlled_w_var) / 2)
hidden_deviation_w = np.abs(hidden_treated_w_mu - hidden_controlled_w_mu) / VAR
hidden_deviation_w[np.isnan(hidden_deviation_w)] = 0
max_unbalanced_weighted = np.max(hidden_deviation_w)
plot(hidden_treated, hidden_controlled, 'original.png')
plot(hidden_treated_w, hidden_controlled_w, 'weighted.png')
return max_unbalanced_original, hidden_deviation, max_unbalanced_weighted, hidden_deviation_w
def plot(hidden_treated, hidden_controlled, save_file):
tsne = TSNE(n_components=2)
treated_embedded = tsne.fit_transform(hidden_treated)
controlled_embedded = tsne.fit_transform(hidden_controlled)
plt.figure()
treated_x, treated_y = treated_embedded[:,0], treated_embedded[:,1]
controlled_x, controlled_y = controlled_embedded[:,0], controlled_embedded[:,1]
plt.scatter(treated_x, treated_y, alpha=0.8, c='red', edgecolors='none', s=30, label='treated')
plt.scatter(controlled_x, controlled_y, alpha=0.8, c='blue', edgecolors='none', s=30, label='controlled')
plt.legend()
plt.savefig(save_file)
def cal_ATE(golds_treatment, logits_treatment, golds_outcome, normalized=False):
ones_idx, zeros_idx = np.where(golds_treatment == 1), np.where(golds_treatment == 0)
if not normalized:
logits_treatment = 1 / (1 + np.exp(-logits_treatment))
p_T = len(ones_idx[0]) / (len(ones_idx[0]) + len(zeros_idx[0]))
treated_w, controlled_w = p_T / logits_treatment[ones_idx], (1 - p_T) / (1. - logits_treatment[zeros_idx])
# treated_w = np.clip(treated_w, a_min=0.05,
# a_max=0.95)
# controlled_w = np.clip(controlled_w, a_min=np.quantile(controlled_w, 0.01),
# a_max=np.quantile(controlled_w, 0.99))
treated_w = np.clip(treated_w, a_min=np.quantile(treated_w, 0.01),
a_max=np.quantile(treated_w, 0.99))
controlled_w = np.clip(controlled_w, a_min=np.quantile(controlled_w, 0.01), a_max=np.quantile(controlled_w, 0.99))
treated_w, controlled_w = np.reshape(treated_w, (len(treated_w), 1)), np.reshape(controlled_w,
(len(controlled_w), 1))
treated_outcome, controlled_outcome = golds_outcome[ones_idx], golds_outcome[zeros_idx]
treated_outcome_w, controlled_outcome_w = treated_outcome * treated_w, controlled_outcome * controlled_w
UncorrectedEstimator_EY1_val, UncorrectedEstimator_EY0_val = np.mean(treated_outcome), np.mean(controlled_outcome)
ATE = UncorrectedEstimator_EY1_val-UncorrectedEstimator_EY0_val
IPWEstimator_EY1_val, IPWEstimator_EY0_val=np.mean(treated_outcome_w), np.mean(controlled_outcome_w)
ATE_w = IPWEstimator_EY1_val-IPWEstimator_EY0_val
return (UncorrectedEstimator_EY1_val, UncorrectedEstimator_EY0_val, ATE), (
IPWEstimator_EY1_val, IPWEstimator_EY0_val, ATE_w)
def get_cohort_size(data_dir):
cohorts = os.listdir(data_dir)
cohorts_size = dict()
for cohort in cohorts:
load_cohort = pickle.load(open(data_dir+cohort, 'rb'))
cohorts_size[cohort]=len(load_cohort)
pickle.dump(cohorts_size, open('./pickles/cohorts_size.pkl', 'wb'))
def load_cohort_size():
return pickle.load(open('mymodel/pickles/cohorts_size.pkl', 'rb'))
def get_medi_CAD_indication():
outputs = open('data/MEDI_CAD_INDI.csv', 'w')
out = set()
with open('data/MEDI_01212013.csv', 'r') as f:
next(f)
for row in f:
row = row.split(',')
if len(row) < 4:
continue
rx_id, drugname, icd = row[0], row[1], row[2]
if icd[:3] in ('410,411,412,413,414'):
if (rx_id, drugname) not in out:
outputs.write('{},{}\n'.format(rx_id, drugname))
out.add((rx_id, drugname))
outputs.close()
def plot_deviation(unbalanced_deviation, balanced_deviation, labels, save_plt):
plt.figure(figsize=(12, 6), dpi=100)
for i in range(len(unbalanced_deviation)):
if i == 0:
plt.scatter(unbalanced_deviation[i], i, color='sandybrown', s=50, label='unweighted')
plt.scatter(balanced_deviation[i], i, color='skyblue', s=50, label='LR_IPW')
else:
plt.scatter(unbalanced_deviation[i], i, color='sandybrown', s=50)
plt.scatter(balanced_deviation[i], i, color='skyblue', s=50)
plt.plot([unbalanced_deviation[i], balanced_deviation[i]], [i, i], color='dimgray')
plt.yticks(range(len(unbalanced_deviation)), labels)
plt.legend()
plt.xlabel('Absolute Standard Mean Difference')
plt.ylabel('Covariates')
plt.tight_layout()
plt.savefig(save_plt)
# get_medi_CAD_indication()
# path_to_css10 = os.path.join(os.getcwd(), '../data/CCS/icd10.csv')
# load_ccs10_2name = np.loadtxt(path_to_css10, delimiter=',', usecols=(1, 3), dtype=str, skiprows=1)
# ccs10_2name_mapping = {(ccs.replace('\'', '')).strip(): name.replace('\"', '') for [ccs, name] in
# load_ccs10_2name}
# pickle.dump(ccs10_2name_mapping, open('../pickles/ccs10_2name_mapping.pkl', 'wb'))
#
# path_to_css9 = os.path.join(os.getcwd(), '../data/CCS/icd9.csv')
# load_ccs9_2name = np.loadtxt(path_to_css9, delimiter=',', usecols=(1, 2), dtype=str, skiprows=1)
# ccs9_2name_mapping = {(ccs.replace('\'', '')).strip(): name.replace('\'', '') for [ccs, name] in
# load_ccs9_2name}
# pickle.dump(ccs9_2name_mapping, open('../pickles/ccs9_2name_mapping.pkl', 'wb'))
#
# path_to_NDC = os.path.join(os.getcwd(), '../data/NDC_complete_mapping.csv')
# load_ccs2name = np.loadtxt(path_to_NDC, delimiter=',', usecols=(0, 1), dtype=str)
# rx2name_mapping = {id: name for [name, id] in load_ccs2name}
# pickle.dump(rx2name_mapping, open('../pickles/rx2name_mapping.pkl', 'wb'))
# drug_atc = {}
# atc_drug = defaultdict(list)
# with open('data/DRUG.csv') as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# next(readCSV)
# for row in readCSV:
# drug_atc[row[1].lower()] = row[19][:3]
#
# mydrug = set()
# with open('res/11.6/LR.csv') as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# next(readCSV)
# for row in readCSV:
# mydrug.add(row[1].lower())
#
# out = open('data/ATC.csv', 'w')
# for drug in mydrug:
# if drug in drug_atc:
# print('{}: {}'.format(drug, drug_atc.get(drug)))
# atc_drug[drug_atc.get(drug)].append(drug)
# out.write('{},{}\n'.format(drug, drug_atc.get(drug)))
# else:
# print('non found: {}'.format(drug))
#
# print(atc_drug)
# drug_id = {}
# with open('res/11.6/LR.csv') as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# next(readCSV)
# for row in readCSV:
# drug_id[row[1].lower()] = row[0]
#
# out = open('data/ATC_final.csv', 'w')
# with open('data/ATC_manu.csv') as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# for row in readCSV:
# drug,atc = row[0],row[1]
# out.write('{},{},{}\n'.format(drug_id.get(drug),drug,atc))
# drug_atc,atc_drug=defaultdict(list), defaultdict(list)
# with open('data/ATC_final.csv') as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# for row in readCSV:
# drug,atc=row[0],row[2]
# drug_atc[drug].append(atc)
# atc_drug[atc].append(drug)
#
# cohort_size = load_cohort_size()
#
# for atc, drugs in atc_drug.items():
# print(atc)
# for drug in drugs:
# n_patient_in_atc = cohort_size.get(drug+'.pkl')
# print(n_patient_in_atc)
#
# pickle.dump(drug_atc, open('mymodel/pickles/DRUG2ATC.pkl','wb'))
# pickle.dump(atc_drug, open('mymodel/pickles/ATC2DRUG.pkl','wb'))
# print(drug_atc)
# print(atc_drug)
# print()
# print(np.random.rand(5))
# np.random.seed(1)
# print(np.random.rand(4))
# import seaborn as sns
# import pandas as pd
# sns.set_style("white")
# # Import data
# df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/diamonds.csv')
# x1 = df.loc[df.cut=='Ideal', 'depth']
# x2 = df.loc[df.cut=='Fair', 'depth']
# # Plot
# kwargs = dict(hist_kws={'alpha':.6}, kde_kws={'linewidth':2})
# plt.figure(figsize=(10,7), dpi= 80)
# sns.distplot(x1.values, color="dodgerblue", label="Compact", **kwargs)
# sns.distplot(x2.values, color="orange", label="SUV", **kwargs)
# plt.xlim(50,75)
# plt.legend()
# plt.show()
# files = os.listdir('user_cohort/10.16')
# patients = set()
# patients_with_stroke = set()
# for f in files:
# load = pickle.load(open('user_cohort/10.16/'+f, 'rb'))
# for patient in load:
# pid, stroke = patient[0], patient[2]
# patients.add(pid)
# if stroke == 1:
# patients_with_stroke.add(pid)
#
# print(len(patients_with_stroke), len(patients))
# print(len(patients_with_stroke)/len(patients))
# np.random.seed(1)
# a = [1,2,3,4,5]
# print(a)
# print(np.random.rand(3))
# np.random.shuffle(a)
# print(a) |
<filename>custom_envs/spurious_predator_prey_env.py
import curses
import gym
import numpy as np
from gym import spaces
class SpuriousPredatorPreyEnv(gym.Env):
def __init__(self,):
self.__version__ = "0.0.1"
self.vision = 0
self.OBS_CLASS = 3
self.OUTSIDE_CLASS = 2
self.PREY_CLASS = 1
self.PREDATOR_CLASS = 0
self.TIMESTEP_PENALTY = -0.05
self.PREY_REWARD = 0
self.POS_PREY_REWARD = 0.05
self.ON_OBS_REWARD = -0.01
self.episode_over = False
self.stdscr = None
self.timestep = None
self.prob_obs = 0.1
self.stuck_on_obs = True
self.do_corrupt_pred = True
self.do_corrupt_prey = True
def init_curses(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
curses.init_pair(3, curses.COLOR_CYAN, -1)
curses.init_pair(4, curses.COLOR_GREEN, -1)
def init_args(self, parser):
env = parser.add_argument_group('Prey Predator task')
env.add_argument('--nenemies', type=int, default=1,
help="Total number of preys in play")
env.add_argument('--dim', type=int, default=5,
help="Dimension of box")
env.add_argument('--vision', type=int, default=2,
help="Vision of predator")
env.add_argument('--moving_prey', action="store_true", default=False,
help="Whether prey is fixed or moving")
env.add_argument('--no_stay', action="store_true", default=False,
help="Whether predators have an action to stay in place")
parser.add_argument('--mode', default='mixed', type=str,
help='cooperative|competitive|mixed (default: mixed)')
env.add_argument('--enemy_comm', action="store_true", default=False,
help="Whether prey can communicate.")
def multi_agent_init(self, args):
# General variables defining the environment : CONFIG
params = ['dim', 'vision', 'moving_prey', 'mode', 'enemy_comm']
for key in params:
setattr(self, key, getattr(args, key))
self.nprey = args.nenemies
self.npredator = args.nfriendly
self.dims = (self.dim, self.dim)
self.stay = not args.no_stay
if args.moving_prey:
raise NotImplementedError
# (0: UP, 1: RIGHT, 2: DOWN, 3: LEFT, 4: STAY)
# Define what an agent can do -
self.naction = 4
if self.stay:
self.naction += 1
self.action_space = spaces.MultiDiscrete([self.naction])
self.num_padded_grid_cells = (self.dims[0] + 2 * self.vision) * (self.dims[1] + 2 * self.vision)
self.num_grid_cells = (self.dims[0] * self.dims[1])
self.OBS_CLASS += self.num_padded_grid_cells
self.OUTSIDE_CLASS += self.num_padded_grid_cells
self.PREY_CLASS += self.num_padded_grid_cells
self.PREDATOR_CLASS += self.num_padded_grid_cells
# Setting max vocab size for 1-hot encoding. We define vocab_size as the number of possible unique states!?!
# The state space is defined by, for each visible location, what's there.
# There are (self.dims[0] + 2 * self.vision)**2 visible locations because you can see off the grid.
# At each location, a location can be:
# 1) Off the grid
# 2) Have an integer number of predators there
# 3) Have an integer number of prey there.
# 4) Have an obstacle there
# So, the state space is num locations x 3 x max_num_predators.
# Observations are the observations for each visible location, which includes the unique id of the location plus
# what's there.
self.observation_dim = self.num_padded_grid_cells + 4
# Observation for each agent will be, for each visible cell, the location of that cell and what's in it.
self.observation_space = spaces.Box(low=0, high=self.npredator, shape=(2 * self.vision + 1,
2 * self.vision + 1,
self.observation_dim), dtype=int)
if args.seed != -1:
np.random.seed(args.seed)
def step(self, action):
"""
The agents take a step in the environment.
Parameters
----------
action : list/ndarray of length m, containing the indexes of what lever each 'm' chosen agents pulled.
Returns
-------
obs, reward, episode_over, info : tuple
obs (object) :
reward (float) : Ratio of Number of discrete levers pulled to total number of levers.
episode_over (bool) : Will be true as episode length is 1
info (dict) : diagnostic information useful for debugging.
"""
if self.episode_over:
raise RuntimeError("Episode is done")
action = np.array(action).squeeze()
action = np.atleast_1d(action)
for i, a in enumerate(action):
self._take_action(i, a)
assert np.all(action <= self.naction), "Actions should be in the range [0,naction)."
self.episode_over = False
self.obs = self.get_obs()
# print(self.obs)
debug = {'predator_locs': self.predator_loc, 'prey_locs': self.prey_loc}
self.timestep += 1
return self.obs, self._get_reward(), self.episode_over, debug
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns
-------
observation (object): the initial observation of the space.
"""
self.episode_over = False
self.timestep = 0
self.reached_prey = np.zeros(self.npredator)
self.on_obs = np.zeros(self.npredator)
# Locations
locs = self._get_coordinates()
self.predator_loc, self.prey_loc = locs[:self.npredator], locs[self.npredator:]
if self.do_corrupt_pred or self.do_corrupt_prey:
self.prey_loc = np.asarray([[0, self.dims[1] - 1]])
self.predator_loc[0] = np.asarray([[self.dims[0] - 1, 0]])
if self.predator_loc.shape[0] > 1:
self.predator_loc[1] = self.prey_loc
# self.predator_loc[1] = np.asarray([[1, self.dims[1] - 1]])
# Reset the grid
self.grid = np.zeros(self.num_grid_cells).reshape(self.dims)
# Padding for vision
self.grid = np.pad(self.grid, self.vision, 'constant', constant_values = self.OUTSIDE_CLASS)
self.empty_bool_base_grid = self._onehot_initialization()
for obs_loc in self.obs_locations:
obs2d = self.__global_to_idxs__(obs_loc)
self.grid[obs2d[0], obs2d[1]] = self.OBS_CLASS
# stat - like success ratio
self.stat = dict()
self.obs = self.get_obs()
return self.obs
def seed(self):
return
def _get_coordinates(self):
idx = np.random.choice(np.prod(self.dims), (self.npredator + self.nprey), replace=False)
return np.vstack(np.unravel_index(idx, self.dims)).T
def __corrupt_state__(self, true_state):
copied_state = np.copy(true_state)
# noisy_prey_locs = [np.random.randint(0, self.dims[0], self.prey_loc.size)]
noisy_prey_locs = [np.zeros((2,)).astype(int)]
# Now overwrite the location in true state of the prey
for p, noisy_p in zip(self.prey_loc, noisy_prey_locs):
# Remove old prey location info
copied_state[self.__global_idxs_to_global__(p[0] + self.vision, p[1] + self.vision), self.PREY_CLASS] = 0
copied_state[self.__global_idxs_to_global__(noisy_p[0] + self.vision, noisy_p[1] + self.vision), self.PREY_CLASS] = 1
return copied_state, noisy_prey_locs
def __corrupt_no_obs_state__(self, true_state):
copied_state = np.copy(true_state)
# And remove obstacles from map
copied_state[:, self.OBS_CLASS] = 0
return copied_state
def get_obs(self):
bool_base_grid = self.get_true_state()
corrupted_base_grid, corrupted_prey_locs = self.__corrupt_state__(bool_base_grid)
corrupted_pred_grid = self.__corrupt_no_obs_state__(bool_base_grid)
# Agents only observe parts of the state.
obs = []
for p in self.predator_loc:
p_obs = []
for visible_x in range(p[0] - self.vision, p[0] + self.vision + 1):
row_obs = []
for visible_y in range(p[1] - self.vision, p[1] + self.vision + 1):
if not self.do_corrupt_pred:
single_obs = bool_base_grid[self.__global_idxs_to_global__(visible_x + self.vision, visible_y + self.vision)]
else:
single_obs = corrupted_pred_grid[self.__global_idxs_to_global__(visible_x + self.vision, visible_y + self.vision)]
# single_obs = bool_base_grid[self.__global_idxs_to_global__(visible_x + self.vision, visible_y + self.vision)]
row_obs.append(single_obs)
p_obs.append(np.stack(row_obs))
obs.append(np.stack(p_obs))
if self.enemy_comm:
prey_locs = self.prey_loc if not self.do_corrupt_prey else corrupted_prey_locs
for p in prey_locs:
p_obs = []
for visible_x in range(p[0] - self.vision, p[0] + self.vision + 1):
row_obs = []
for visible_y in range(p[1] - self.vision, p[1] + self.vision + 1):
if not self.do_corrupt_prey:
single_obs = bool_base_grid[self.__global_idxs_to_global__(visible_x + self.vision, visible_y + self.vision)]
else:
single_obs = corrupted_base_grid[self.__global_idxs_to_global__(visible_x + self.vision, visible_y + self.vision)]
if self.timestep > 0:
single_obs = np.zeros_like(single_obs)
row_obs.append(single_obs)
p_obs.append(np.stack(row_obs))
obs.append(np.stack(p_obs))
obs = np.stack(obs)
return obs
def get_true_state(self):
"""Returns the true state of the world rather than observations of it."""
# Populate a grid with the true locations of everything.
bool_base_grid = self.empty_bool_base_grid.copy()
for i, p in enumerate(self.predator_loc):
bool_base_grid[self.__global_idxs_to_global__(p[0] + self.vision, p[1] + self.vision), self.PREDATOR_CLASS] += 1
for i, p in enumerate(self.prey_loc):
bool_base_grid[self.__global_idxs_to_global__(p[0] + self.vision, p[1] + self.vision), self.PREY_CLASS] += 1
# Then just return that grid.
return bool_base_grid
def _can_drive_into(self, loc):
return loc != self.OUTSIDE_CLASS # and loc != self.OBS_CLASS
def _take_action(self, idx, act):
# prey action
if idx >= self.npredator:
# fixed prey
if not self.moving_prey:
return
else:
raise NotImplementedError
# The prey is an absorbing state, so predators don't keep moving.
# if self.reached_prey[idx] == 1:
# return
if self.stuck_on_obs and self.on_obs[idx] == 1:
return
# STAY action
if act == 5:
return
# UP
if act == 0 and self._can_drive_into(self.grid[max(self.vision,
self.predator_loc[idx][0] + self.vision - 1),
self.predator_loc[idx][1] + self.vision]):
self.predator_loc[idx][0] = max(0, self.predator_loc[idx][0]-1)
# RIGHT
elif act == 1 and self._can_drive_into(self.grid[self.predator_loc[idx][0] + self.vision,
min(self.dims[1] + self.vision - 1,
self.predator_loc[idx][1] + self.vision + 1)]):
self.predator_loc[idx][1] = min(self.dims[1]-1,
self.predator_loc[idx][1]+1)
# DOWN
elif act == 2 and self._can_drive_into(self.grid[min(self.dims[0] + self.vision -1,
self.predator_loc[idx][0] + self.vision + 1),
self.predator_loc[idx][1] + self.vision]):
self.predator_loc[idx][0] = min(self.dims[0]-1,
self.predator_loc[idx][0]+1)
# LEFT
elif act == 3 and self._can_drive_into(self.grid[self.predator_loc[idx][0] + self.vision,
max(self.vision,
self.predator_loc[idx][1] + self.vision - 1)]):
self.predator_loc[idx][1] = max(0, self.predator_loc[idx][1]-1)
def _check_no_collisions(self):
# Sanity check: are any agents in obstacles?
num_collisions = 0
preds_on_obstacles = []
for obs in self.obs_locations:
for p_id, p in enumerate(self.predator_loc):
if p[0] == obs[0] - self.vision and p[1] == obs[1] - self.vision:
# print("On an obstacle! at", obs)
num_collisions += 1
preds_on_obstacles.append(p_id)
if num_collisions == len(self.predator_loc):
return num_collisions, preds_on_obstacles # All are colliding
break # Regardless, this predator can't collide with more than one obstacle.
return num_collisions, preds_on_obstacles
def _get_reward(self):
n = self.npredator if not self.enemy_comm else self.npredator + self.nprey
reward = np.full(n, self.TIMESTEP_PENALTY)
on_prey = np.where(np.all(self.predator_loc == self.prey_loc, axis=1))[0]
nb_predator_on_prey = on_prey.size
nb_predator_on_obs, colliding_preds = self._check_no_collisions()
if self.mode == 'cooperative':
reward[on_prey] = self.POS_PREY_REWARD * nb_predator_on_prey
reward[colliding_preds] += self.ON_OBS_REWARD * nb_predator_on_obs
elif self.mode == 'competitive':
if nb_predator_on_prey:
reward[on_prey] = self.POS_PREY_REWARD / nb_predator_on_prey
elif self.mode == 'mixed':
reward[on_prey] = self.PREY_REWARD
else:
raise RuntimeError("Incorrect mode, Available modes: [cooperative|competitive|mixed]")
self.reached_prey[on_prey] = 1
self.on_obs[colliding_preds] = 1
# if np.all(self.reached_prey == 1) and self.mode == 'mixed':
# self.episode_over = True
# Prey reward
if nb_predator_on_prey == 0:
reward[self.npredator:] = -1 * self.TIMESTEP_PENALTY
else:
reward[self.npredator:] = 0
# Success ratio
if self.mode != 'competitive':
if nb_predator_on_prey == self.npredator:
self.stat['success'] = 1
else:
self.stat['success'] = 0
self.stat['collisions'] = nb_predator_on_obs
return reward
def reward_terminal(self):
return np.zeros_like(self._get_reward())
def _onehot_initialization(self):
# Each row has a unique id of the location, plus extra slots denoting:
# 1) How many predators are there
# 2) How many prey are there
# 3) Whether the cell is outside or not.
# 4) If there's an obstacle there
one_hot_array = np.zeros((self.num_padded_grid_cells, self.observation_dim))
global_idx = 0
self.obs_locations = []
for row_idx, row in enumerate(self.grid):
for col_idx in range(row.shape[0]):
one_hot_array[global_idx][global_idx] = 1
if row_idx < self.vision or row_idx >= self.dims[0] + self.vision or\
col_idx < self.vision or col_idx >= self.dims[1] + self.vision:
one_hot_array[global_idx][self.OUTSIDE_CLASS] = 1
elif np.random.random() < self.prob_obs: # Don't create an obstacle where an agent already is or outside.
conflict = False
for pred in self.predator_loc:
if self.__global_idxs_to_global__(pred[0] + self.vision, pred[1] + self.vision) == global_idx:
conflict = True
break
for prey in self.prey_loc:
if self.__global_idxs_to_global__(prey[0] + self.vision, prey[1] + self.vision) == global_idx:
conflict = True
break
if not conflict:
one_hot_array[global_idx][self.OBS_CLASS] = 1
self.obs_locations.append(global_idx) # TODO: do obstacles.
global_idx += 1
obs_grid_locs = [self.__global_to_idxs__(loc) for loc in self.obs_locations]
self.obs_locations = np.asarray(obs_grid_locs).reshape((-1, 2))
return one_hot_array
def __global_idxs_to_global__(self, row, col):
"""Helper function maps a row and column to the global id; used for indexing into state."""
return (self.dims[0] + self.vision * 2) * row + col
def __global_to_idxs__(self, global_idx):
row = global_idx // (self.dims[0] + self.vision * 2)
col = global_idx - row * (self.dims[0] + self.vision * 2)
return row, col
def render(self, mode='human', close=False):
grid = np.zeros(self.num_grid_cells, dtype=object).reshape(self.dims)
self.stdscr.clear()
for p in self.predator_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'X'
else:
grid[p[0]][p[1]] = 'X'
for p in self.prey_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'P'
else:
grid[p[0]][p[1]] = 'P'
for obs in self.obs_locations:
r, c = obs
r -= self.vision
c -= self.vision
if 0 <= r < len(grid) and 0 <= c < len(grid[0]):
if grid[r][c] != 0:
grid[r][c] = str(grid[r][c]) + 'O'
else:
grid[r][c] = 'O'
for row_num, row in enumerate(grid):
for idx, item in enumerate(row):
if item != 0:
if 'X' in item and 'P' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(3))
elif 'X' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(1))
else:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(2))
else:
self.stdscr.addstr(row_num, idx * 4, '0'.center(3), curses.color_pair(4))
self.stdscr.addstr(len(grid), 0, '\n')
self.stdscr.refresh()
def exit_render(self):
curses.endwin()
|
<filename>python/code/training.py
# The MIT License (MIT)
# =====================
#
# Copyright © 2020 Azavea
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the “Software”), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
def numpy_replace(np_arr,
replacement_dict,
label_nd):
"""Replace the contents of np_arr according to the mapping given in replacement_dict
Arguments:
np_arr {np.ndarray} -- The numpy array to alter
replacement_dict {INT2INT} -- The replacement mapping
label_nd {SCALER} -- The label nodata
Returns:
np.ndarray -- The array with replacement performed
"""
b = np.copy(np_arr)
b[~np.isin(np_arr, list(replacement_dict.keys()))] = label_nd
for k, v in replacement_dict.items():
b[np_arr == k] = v
return b
def get_batch(libchips,
args,
batch_multiplier=1):
"""Read a batch of imagery and labels
Arguments:
libchips {ctypes.CDLL} -- A shared library handle used for reading data
args {argparse.Namespace} -- The arguments dictionary
Keyword Arguments:
batch_multiplier {int} -- How many base batches to fetch at once
Returns:
Tuple[torch.Tensor, torch.Tensor] -- The raster data and label data as PyTorch tensors in a tuple
"""
assert(args.label_nd is not None)
shape_imagery = (len(args.bands), args.window_size_imagery,
args.window_size_imagery)
shape_labels = (args.window_size_labels, args.window_size_labels)
temp1 = np.zeros(shape_imagery, dtype=np.float32)
temp1_ptr = temp1.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
temp2 = np.zeros(shape_labels, dtype=np.int32)
temp2_ptr = temp2.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
rasters = []
labels = []
for _ in range(args.batch_size * batch_multiplier):
while True:
again = False
libchips.get_next(temp1_ptr, temp2_ptr)
if args.forbidden_imagery_value is not None:
again = again or np.any(
temp1 == args.forbidden_imagery_value)
if args.forbidden_label_value is not None:
again = again or np.any(
temp2 == args.forbidden_label_value)
if args.desired_label_value is not None:
if not np.any(temp2 == args.desired_label_value):
again = again or (args.reroll > random.random())
if not again:
break
rasters.append(temp1.copy())
labels.append(temp2.copy())
raster_batch = []
label_batch = []
for raster, label in zip(rasters, labels):
# NODATA from labels
label = np.array(label, dtype=np.long)
label = numpy_replace(label, args.label_map, args.label_nd)
label_nds = (label == args.label_nd)
# NODATA from rasters
image_nds = np.zeros(raster[0].shape)
if args.image_nd is not None:
image_nds += (raster == args.image_nd).sum(axis=0)
# NODATA from NaNs in rasters
image_nds += np.isnan(raster).sum(axis=0)
# Set label NODATA, remove NaNs from rasters
if args.window_size_imagery == args.window_size_labels:
nodata1 = nodata2 = ((image_nds + label_nds) > 0)
else:
ratio = float(args.window_size_labels) / args.window_size_imagery
image_nds2 = scipy.ndimage.zoom(
image_nds, ratio, order=0, prefilter=False)
label_nds2 = scipy.ndimage.zoom(
label_nds, 1/ratio, order=0, prefilter=False)
nodata1 = ((image_nds2 + label_nds) > 0)
nodata2 = ((image_nds + label_nds2) > 0)
label[nodata1 == True] = args.label_nd
for i in range(len(raster)):
raster[i][nodata2 == True] = 0.0
raster_batch.append(raster)
label_batch.append(label)
raster_batch_tensor = torch.from_numpy(np.stack(raster_batch, axis=0))
label_batch_tensor = torch.from_numpy(np.stack(label_batch, axis=0))
return (raster_batch_tensor, label_batch_tensor)
def train(model,
opt,
sched,
obj,
epochs,
libchips,
device,
args,
arg_hash,
no_checkpoints=True,
starting_epoch=0):
"""Train the model according the supplied data and (implicit and explicit) hyperparameters
Arguments:
model {torch.nn.Module} -- The model to train
opt {OPT} -- The optimizer to use
obj {OBJ} -- The objective function to use
epochs {int} -- The number of "epochs"
libchips {ctypes.CDLL} -- A shared library handle through which data can be read
device {torch.device} -- The device to use
args {argparse.Namespace} -- The arguments dictionary
arg_hash {str} -- The arguments hash
Keyword Arguments:
no_checkpoints {bool} -- Whether to not write checkpoint files (default: {True})
starting_epoch {int} -- The starting epoch (default: {0})
"""
current_time = time.time()
model.train()
for i in range(starting_epoch, epochs):
avg_loss = 0.0
for _ in range(args.max_epoch_size):
batch = get_batch(libchips, args)
opt.zero_grad()
pred = model(batch[0].to(device))
loss = None
if isinstance(pred, dict):
pred_seg = pred.get('seg', pred.get('out', None))
pred_aux = pred.get('aux', None)
pred_2seg = pred.get('2seg', None)
pred_reg = pred.get('reg', None)
else:
pred_seg = pred
pred_aux = pred_2seg = pred_reg = None
# Scale predictions to labels if needed
if args.window_size_labels != args.window_size_imagery:
if pred_seg is not None:
pred_seg = torch.nn.functional.interpolate(
pred_seg, args.window_size_labels, mode='bilinear', align_corners=False)
if pred_aux is not None:
pred_aux = torch.nn.functional.interpolate(
pred_aux, args.window_size_labels, mode='bilinear', align_corners=False)
if pred_2seg is not None:
pred_2seg = torch.nn.functional.interpolate(
pred_2seg, args.window_size_labels, mode='bilinear', align_corners=False)
# Various kinds of segmentation
if pred_seg is not None and pred_aux is None:
# segmentation only
labels = batch[1].to(device)
loss = obj.get('seg')(pred_seg, labels)
elif pred_seg is not None and pred_aux is not None:
# segmentation with auxiliary output
labels = batch[1].to(device)
loss = obj.get('seg')(pred_seg, labels) + \
0.4 * obj.get('seg')(pred_aux, labels)
elif pred_2seg is not None:
# binary segmentation only
labels = (batch[1] == 1).to(device, dtype=torch.float)
# XXX the above assumes that background and target are 0 and 1, respectively
pred_2seg = pred_2seg[:, 0, :, :]
loss = obj.get('2seg')(pred_2seg, labels)
if pred_reg is not None:
pcts = []
for label in batch[1].cpu().numpy():
# XXX assumes that background and target are 0 and 1, respectively
ones = float((label == 1).sum())
zeros = float((label == 0).sum())
pcts.append([(ones/(ones + zeros + 1e-8))])
pcts = torch.FloatTensor(pcts).to(device)
loss += obj.get('l1')(pred_reg, pcts) + obj.get('l2')(pred_reg, pcts)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1000)
opt.step()
if sched is not None:
sched.step()
avg_loss = avg_loss + loss.item()
avg_loss = avg_loss / args.max_epoch_size
libchips.recenter(0)
last_time = current_time
current_time = time.time()
print('\t\t epoch={}/{} time={} avg_loss={}'.format(
i+1, epochs, current_time - last_time, avg_loss))
with WATCHDOG_MUTEX:
global WATCHDOG_TIME
WATCHDOG_TIME = time.time()
if ((i == epochs - 1) or ((i > 0) and (i % 13 == 0) and args.s3_bucket and args.s3_prefix)) and not no_checkpoints:
if not args.no_upload:
torch.save(model.state_dict(), 'weights.pth')
s3 = boto3.client('s3')
checkpoint_name = '{}/{}/weights_checkpoint_{}.pth'.format(
args.s3_prefix, arg_hash, i)
print('\t\t checkpoint_name={}'.format(checkpoint_name))
s3.upload_file(
'weights.pth', args.s3_bucket, checkpoint_name)
del s3
|
#!/usr/bin/python
import json
import numpy as np
import math
import sys
import argparse
from SetupLUT import FP2FIX, SetupTwiddlesLUT, SetupSwapTable, SetupSwapTableR4, SetupDCTTable, SetupLiftCoeff, SetupTwiddlesRFFT, MFCC_COEFF_DYN
def array_to_def_c_file(arr, name, data_type, size, elem_in_rows=2):
Out_str = ""
# Window
Out_str += "PI_L2 {} {}[{}] = {{\n\t".format(data_type, name, size)
for i, elem in enumerate(arr):
Out_str += str(elem) + ", "
if (i+1)%elem_in_rows == 0:
Out_str += "\n\t"
Out_str += "\n}; \n"
return Out_str
def create_parser():
# create the top-level parser
parser = argparse.ArgumentParser(prog='mfcc_lut_gen')
parser.add_argument('--params_json', default=None,
help="Path to .json file where to get parameters")
parser.add_argument('--fft_lut_file', required="--params_json" not in sys.argv,
help="path to fft lut file")
parser.add_argument('--mfcc_bf_lut_file', required="--params_json" not in sys.argv,
help="path to fft lut file")
parser.add_argument('--sample_rate', required="--params_json" not in sys.argv, type=int)
parser.add_argument('--frame_size', required="--params_json" not in sys.argv, type=int,
help="size in number of samples of one frame")
parser.add_argument('--frame_step', required="--params_json" not in sys.argv, type=int,
help="step in number of samples between two consecutive frames")
parser.add_argument('--win_func', default="hanning", type=str,
help="numpy window function (e.g. hanning)")
parser.add_argument('--n_fft', default=None, type=int,
help="number of fft bins")
parser.add_argument('--fmin', default=20.0, type=float,
help="mel spectrogram frequency min")
parser.add_argument('--fmax', default=4000.0, type=float,
help="mel spectrogram frequency max")
parser.add_argument('--librosa_mel_norm', default="slaney",
help="mel spectrogram norm function if librosa")
parser.add_argument('--mfcc_bank_cnt', default=40 if not "--use_librosa" in sys.argv else 128, type=int,
help="number of mel filterbanks, must be <= n_dct if n_dct!=0")
parser.add_argument('--use_tf_mfcc', action='store_true',
help="use tensorflow filterbank")
parser.add_argument('--use_librosa', action='store_true',
help="use librosa filterbank")
parser.add_argument('--n_dct', default=0, type=int,
help="number of dct bins, if 0 will not be generated any lut")
parser.add_argument('--dct_type', default=2, type=int,
help="DCT algortihm type")
parser.add_argument('--lifter_coeff', default=0, type=int,
help="Lifter coefficient (default: 0)")
parser.add_argument('--save_params_header', default=None,
help="Writes all this parameters in a .h file")
parser.add_argument('--dtype', default="fix16")
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if args.params_json:
with open(args.params_json, "r") as f:
models_params = json.load(f)
else:
models_params = {}
print(models_params)
fft_lut_file = args.fft_lut_file if not "fft_lut_file" in models_params else models_params["fft_lut_file"]
mfcc_bf_lut_file = args.mfcc_bf_lut_file if not "mfcc_bf_lut_file" in models_params else models_params["mfcc_bf_lut_file"]
use_tf_mfcc = args.use_tf_mfcc if not "use_tf_mfcc" in models_params else models_params["use_tf_mfcc"]
use_librosa = args.use_librosa if not "use_librosa" in models_params else models_params["use_librosa"]
sample_rate = args.sample_rate if not "sample_rate" in models_params else models_params["sample_rate"]
frame_size = args.frame_size if not "frame_size" in models_params else models_params["frame_size"]
frame_step = args.frame_step if not "frame_step" in models_params else models_params["frame_step"]
window_fn = args.win_func if not "win_func" in models_params else models_params["win_func"]
n_fft = args.n_fft if not "n_fft" in models_params else models_params["n_fft"]
fmax = args.fmax if not "fmax" in models_params else models_params["fmax"]
fmin = args.fmin if not "fmin" in models_params else models_params["fmin"]
librosa_mel_norm = args.librosa_mel_norm if not "librosa_mel_norm" in models_params else models_params["librosa_mel_norm"]
mfcc_bank_cnt = args.mfcc_bank_cnt if not "mfcc_bank_cnt" in models_params else models_params["mfcc_bank_cnt"]
n_dct = args.n_dct if not "n_dct" in models_params else models_params["n_dct"]
dct_type = args.dct_type if not "dct_type" in models_params else models_params["dct_type"]
lifter_coeff = args.lifter_coeff if not "lifter_coeff" in models_params else models_params["lifter_coeff"]
dtype = args.dtype if not "dtype" in models_params else models_params["dtype"]
lut_dtype = "int" if dtype == "fix16" or dtype == "fix32_scal" else dtype
n_fft_int = n_fft if dtype == "fix32_scal" else n_fft // 2
if lut_dtype == "int":
data_type = "short int"
elif lut_dtype == "float16":
data_type = "f16"
elif lut_dtype == "float32":
data_type = "float"
else:
raise NotImplemented(f"lut_dtype = {lut_dtype} not implemeted, available 'int' 'float32' 'float16'")
print(lut_dtype)
win_func = getattr(np, window_fn)
if lut_dtype == "int":
Window = (win_func(frame_size) * 2**(15)).astype(np.int16)
else:
Window = win_func(frame_size).astype(np.float16)
Twiddles_cos, Twiddles_sin = SetupTwiddlesLUT(n_fft_int, dtype=lut_dtype)
if round(math.log(n_fft_int, 4)) == math.log(n_fft_int, 4):
SwapTableR4 = SetupSwapTableR4(n_fft_int)
Twiddles_cosR4, Twiddles_sinR4 = Twiddles_cos[:int(3/4*n_fft_int)], Twiddles_sin[:int(3/4*n_fft_int)]
print("Setting up twiddles for radix 4 ", len(Twiddles_cosR4))
SwapTableR2 = SetupSwapTable(n_fft_int)
Twiddles_cosR2, Twiddles_sinR2 = Twiddles_cos[:int(n_fft_int//2)], Twiddles_sin[:int(n_fft_int//2)]
print("Setting up twiddles for radix 2 ", len(Twiddles_cosR2))
RFFTTwiddles_real, RFFTTwiddles_imag = SetupTwiddlesRFFT(n_fft, dtype=lut_dtype)
if n_dct > 0:
DCT_Coeff = SetupDCTTable(n_dct, dct_type, lut_dtype)
if lifter_coeff > 0:
Lift_Coeff = SetupLiftCoeff(lifter_coeff, n_dct)
################################ WRITE TO FILE #######################################
Out_str = array_to_def_c_file(Window, "WindowLUT", data_type, frame_size, elem_in_rows=12)
Out_str += array_to_def_c_file(SwapTableR2.astype(np.int16), "SwapTableR2", "short int", n_fft_int, elem_in_rows=2)
if round(math.log(n_fft_int, 4)) == math.log(n_fft_int, 4):
Out_str += array_to_def_c_file(SwapTableR4.astype(np.int16), "SwapTableR4", "short int", n_fft_int, elem_in_rows=2)
# FFT
Out_str += "PI_L2 {} TwiddlesLUTR2[{}] = {{\n".format(data_type, 2*len(Twiddles_cosR2))
for i in range(len(Twiddles_cosR2)):
Out_str += "\t {}, {}, \n".format(Twiddles_cosR2[i], Twiddles_sinR2[i])
Out_str += "\n};\n\n"
if not dtype == "fix32_scal":
if round(math.log(n_fft_int, 4)) == math.log(n_fft_int, 4):
Out_str += "PI_L2 {} TwiddlesLUTR4[{}] = {{\n".format(data_type, 2*len(Twiddles_cosR2))
for i in range(len(Twiddles_cosR4)):
Out_str += "\t {}, {}, \n".format(Twiddles_cosR4[i], Twiddles_sinR4[i])
Out_str += "\n};\n\n"
Out_str += "PI_L2 {} RFFTTwiddlesLUT[{}] = {{\n".format(data_type, 2*len(RFFTTwiddles_real))
for i in range(len(RFFTTwiddles_real)):
Out_str += "\t {}, {}, \n".format(RFFTTwiddles_real[i], RFFTTwiddles_imag[i])
Out_str += "\n};\n\n"
# DCT
if n_dct > 0:
Out_str += array_to_def_c_file(DCT_Coeff.flatten(), "DCT_Coeff", data_type, n_dct*n_dct, elem_in_rows=n_dct)
if lifter_coeff > 0:
Out_str += array_to_def_c_file(Lift_Coeff.flatten(), "Lift_Coeff", data_type, n_dct, elem_in_rows=6)
with open(fft_lut_file, 'w') as f:
f.write(Out_str)
#####################################################################################
# MFCC
if use_tf_mfcc:
from SetupLUT import GenMFCC_FB_tf
filters, MFCC_Coeff, HeadCoeff = GenMFCC_FB_tf(n_fft, mfcc_bank_cnt, Fmin=fmin, Fmax=fmax, sample_rate=sample_rate, dtype=lut_dtype)
elif use_librosa:
from SetupLUT import GenMFCC_FB_librosa
filters, MFCC_Coeff, HeadCoeff = GenMFCC_FB_librosa(n_fft, mfcc_bank_cnt, Fmin=fmin, Fmax=fmax, sample_rate=sample_rate, norm=librosa_mel_norm, dtype=lut_dtype)
else:
from SetupLUT import GenMFCC_FB
filters, MFCC_Coeff, HeadCoeff = GenMFCC_FB(n_fft, mfcc_bank_cnt, Fmin=fmin, Fmax=fmax, sample_rate=sample_rate, dtype=lut_dtype)
Out_str = "#define MFCC_COEFF_CNT\t{}\n\n".format(HeadCoeff+1)
Out_str += "/* Filter Bank bands:\n\n"
Out_str += "\tMinimum Frequency: {} Hz\n".format(fmin)
Out_str += "\tMaximum Frequency: {} Hz*/\n\n".format(fmax)
Out_str += "PI_L2 fbank_type_t MFCC_FilterBank[{}] = {{\n".format(mfcc_bank_cnt)
HeadCoeff = 0
for i, filt in enumerate(filters):
if np.all(filt == 0):
Start = 0
Stop = 0
Base = HeadCoeff
Items = 0
else:
Start = np.argmax(filt!=0)
Stop = len(filt) - np.argmax(filt[::-1]!=0) - 1
Base = HeadCoeff
Items = Stop - Start + 1
Out_str += "\t{{{:>4},{:>4},{:>4}}},\n".format(Start, Items, Base)
HeadCoeff += Items
Out_str += "};\n\n"
Out_str += "PI_L2 {} MFCC_Coeffs[{}] = {{\n\t".format(data_type, HeadCoeff+1)
for i, coeff in enumerate(MFCC_Coeff):
Out_str += "{:>5}".format(str(coeff)) + ", "
if (i+1) % 15 == 0:
Out_str += "\n\t"
# Add a last 0 coeff
Out_str += "{:>5}\n}};\n".format(0)
with open(mfcc_bf_lut_file, "w") as f:
f.write(Out_str)
if args.save_params_header:
with open(args.save_params_header, "w") as f:
f.write("#define\t{:21}{:>10}\n".format("SAMPLERATE", sample_rate))
f.write("#define\t{:21}{:>10}\n".format("FRAME_SIZE", frame_size))
f.write("#define\t{:21}{:>10}\n".format("FRAME_STEP", frame_step))
f.write("#define\t{:21}{:>10}\n".format("N_FFT", n_fft))
f.write("#define\t{:21}{:>10}\n".format("DATA_TYPE", 2 if dtype=="float16" else (3 if dtype=="float32" else (1 if dtype=="fix32_scal" else 0))))
f.write("#define\t{:21}{:>10}\n".format("MFCC_BANK_CNT", mfcc_bank_cnt))
f.write("#define\t{:21}{:>10}\n".format("FMIN", fmin))
f.write("#define\t{:21}{:>10}\n".format("FMAX", fmax))
f.write("#define\t{:21}{:>10}\n".format("MFCC_COEFF_CNT", HeadCoeff+1))
f.write("#define\t{:21}{:>10}\n".format("N_DCT", n_dct))
if __name__ == "__main__":
main()
|
<filename>fractalis/data/etl.py
"""This module provides the ETL class"""
import abc
import json
import logging
import os
from Cryptodome.Cipher import AES
# noinspection PyProtectedMember
from celery import Task
from pandas import DataFrame
from fractalis import app, redis
from fractalis.data.check import IntegrityCheck
from fractalis.utils import get_cache_encrypt_key
logger = logging.getLogger(__name__)
class ETL(Task, metaclass=abc.ABCMeta):
"""This is an abstract class that implements a celery Task and provides a
factory method to create instances of implementations of itself. Its main
purpose is to manage extraction of the data from the target server. ETL
stands for (E)xtract (T)ransform (L)oad and not by coincidence similar
named methods can be found in this class.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""Used by celery to identify this task by name."""
pass
@property
@abc.abstractmethod
def produces(self) -> str:
"""This specifies the fractalis internal format that this ETL
produces. Can be one of: ['categorical', 'numerical', 'numerical_array']
"""
pass
@staticmethod
@abc.abstractmethod
def can_handle(handler: str, descriptor: dict) -> bool:
"""Check if the current implementation of ETL can handle given handler
and data type.
WARNING: You should never raise an Exception here and expect it to be
propagated further up. It will be caught and assumed that the
current ETL cannot handle the given arguments.
:param handler: Describes the handler. E.g.: transmart, ada
:param descriptor: Describes the data that we want to download.
:return: True if implementation can handle given parameters.
"""
pass
@staticmethod
def factory(handler: str, descriptor: dict) -> 'ETL':
"""Return an instance of the implementation ETL that can handle the
given parameters.
:param handler: Describes the handler. E.g.: transmart, ada
:param descriptor: Describes the data that we want to download.
:return: An instance of an implementation of ETL that returns True for
can_handle()
"""
from . import ETL_REGISTRY
for ETLTask in ETL_REGISTRY:
# noinspection PyBroadException
try:
if ETLTask.can_handle(handler, descriptor):
return ETLTask()
except Exception as e:
logger.warning("Caught exception and assumed that ETL '{}' "
"cannot handle handler '{}' "
"and descriptor: '{}'. Exception:'{}'".format(
type(ETLTask).__name__,
handler,
str(descriptor), e))
continue
raise NotImplementedError(
"No ETL implementation found for handler '{}' "
"and descriptor '{}'".format(handler, descriptor))
@abc.abstractmethod
def extract(self, server: str, token: str, descriptor: dict) -> object:
"""Extract the data via HTTP requests.
:param server: The server from which to extract from.
:param token: The token used for authentication.
:param descriptor: The descriptor containing all necessary information
to download the data.
"""
pass
@abc.abstractmethod
def transform(self, raw_data: object, descriptor: dict) -> DataFrame:
"""Transform the data into a pandas.DataFrame with a naming according to
the Fractalis standard format.
:param raw_data: The return value of extract().
:param descriptor: The data descriptor, sometimes needed
for transformation
"""
pass
def sanity_check(self):
"""Check whether ETL is still sane and should be continued. E.g. if
redis has been cleared it does not make sense to proceed. Raise an
exception if not sane."""
check_1 = redis.exists('data:{}'.format(self.request.id))
if not check_1:
error = "ETL failed! The associated entry in " \
"Redis has been removed while the ETL was running."
logger.error(error)
raise RuntimeError(error)
def update_redis(self, data_frame: DataFrame) -> None:
"""Set several meta information that can be used to filter the data
before the analysis.
:param data_frame: The extracted and transformed data.
"""
value = redis.get(name='data:{}'.format(self.request.id))
assert value is not None
data_state = json.loads(value)
if 'feature' in data_frame.columns:
features = data_frame['feature'].unique().tolist()
else:
features = []
data_state['meta']['features'] = features
redis.setex(name='data:{}'.format(self.request.id),
value=json.dumps(data_state),
time=app.config['FRACTALIS_DATA_LIFETIME'])
@staticmethod
def secure_load(data_frame: DataFrame, file_path: str) -> None:
"""Save data to the file system in encrypted form using AES and the
web service secret key. This can be useful to comply with certain
security standards.
:param data_frame: DataFrame to write.
:param file_path: File to write to.
"""
os.makedirs(os.path.dirname(file_path), exist_ok=True)
data = data_frame.to_json()
data = data.encode('utf-8')
key = get_cache_encrypt_key(app.config['SECRET_KEY'])
cipher = AES.new(key, AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(data)
with open(file_path, 'wb') as f:
[f.write(x) for x in (cipher.nonce, tag, ciphertext)]
@staticmethod
def load(data_frame: DataFrame, file_path: str) -> None:
"""Load (save) the data to the file system.
:param data_frame: DataFrame to write.
:param file_path: File to write to.
"""
os.makedirs(os.path.dirname(file_path), exist_ok=True)
data_frame.to_pickle(file_path, compression='gzip')
def run(self, server: str, token: str,
descriptor: dict, file_path: str,
encrypt: bool) -> None:
"""Run extract, transform and load. This is called by the celery worker.
This is called by the celery worker.
:param
:param server: The server on which the data are located.
:param token: The token used for authentication.
:param descriptor: Contains all necessary information to download data
:param file_path: The location where the data will be stored
:param encrypt: Whether or not the data should be encrypted.
:return: The data id. Used to access the associated redis entry later
"""
logger.info("Starting ETL process ...")
logger.info("(E)xtracting data from server '{}'.".format(server))
try:
self.sanity_check()
raw_data = self.extract(server, token, descriptor)
except Exception as e:
logger.exception(e)
raise RuntimeError("Data extraction failed. {}".format(e))
logger.info("(T)ransforming data to Fractalis format.")
try:
self.sanity_check()
data_frame = self.transform(raw_data, descriptor)
checker = IntegrityCheck.factory(self.produces)
checker.check(data_frame)
except Exception as e:
logger.exception(e)
raise RuntimeError("Data transformation failed. {}".format(e))
if not isinstance(data_frame, DataFrame):
error = "transform() must return 'pandas.DataFrame', " \
"but returned '{}' instead.".format(type(data_frame))
logging.error(error, exc_info=1)
raise TypeError(error)
try:
self.sanity_check()
if encrypt:
self.secure_load(data_frame, file_path)
else:
self.load(data_frame, file_path)
self.update_redis(data_frame)
except Exception as e:
logger.exception(e)
raise RuntimeError("Data loading failed. {}".format(e))
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 4 11:33:01 2017
@author: gualandi
"""
import numpy as np
from gurobipy import Model, GRB, quicksum, tuplelist
def ComputeDistanceMatrix(n, p=2):
""" Compute the ground distance with power p of an n*n image """
C = {}
for i in range(n):
for j in range(n):
C[i,j] = {}
for v in range(n):
for w in range(n):
C[i,j][v,w] = pow(abs(i - v)**p + abs(j - w)**p, 1/p)
return C
def WassersteinDualCutting(h1, h2, M):
""" Find the Wasserstein distance using a cutting plane on the dual """
n = len(h1)
P = []
for i in range(n):
for j in range(n):
P.append((i,j))
# Build model
m = Model()
m.setParam(GRB.Param.TimeLimit, 300)
#m.setParam(GRB.Param.Presolve, 0)
#m.setParam(GRB.Param.Threads, 1)
# Options are:
# -1=automatic, 0=primal simplex, 1=dual simplex, 2=barrier,
# 3=concurrent, 4=deterministic concurrent.
m.setParam(GRB.Param.Method, 1)
# Set a maximization problem
m.setAttr(GRB.Attr.ModelSense, -1)
print('1. Start building model')
# Create variables
V = {}
U = {}
for i,j in P:
# First set of dual variables
V[i,j] = m.addVar(lb=-GRB.INFINITY, ub=0, obj=h1[i,j])
# Second set of dual variables
u_ub = sum([M[v,w][i,j] for v,w in P])
U[i,j] = m.addVar(lb=0, ub=u_ub, obj=h2[i,j])
m.update()
print('2. Add initial constraint sets')
for i,j in P:
for v,w in P:
if M[i,j][v,w] <= 16.001: # Threshold for first set of constraints
m.addConstr(V[i,j] + U[v,w], GRB.LESS_EQUAL, M[i,j][v,w])
print('3. Start Cutting planes')
# Solve the model
it = 0
stime = 0
while True:
it += 1
m.optimize()
break
stime += m.RunTime
flag = True
max_depth = 0
for i,j in P:
depth = -1
a,b,c,d = -1,-1,-1,-1
for v,w in P:
if V[i,j].X + U[v,w].X - M[i,j][v,w] > depth:
a,b,c,d = i,j,v,w
depth = V[i,j].X + U[v,w].X - M[i,j][v,w]
if (max_depth == 0 and depth > 0.001) or (depth >= max_depth):
max_depth = max(max_depth, depth)
flag = False
m.addConstr(V[a,b] + U[c,d], GRB.LESS_EQUAL, M[a,b][c,d])
print('ITERATION:', it,' MAX DEPTH:',round(max_depth,3),' Time:',round(stime,3))
if flag:
break
#else:
# m.addConstr(V[a,b] + U[c,d], GRB.LESS_EQUAL, M[a,b][c,d])
return m.getAttr(GRB.Attr.ObjVal)
#------------------------------------------
# MAIN ENTRY POINT
#------------------------------------------
if __name__ == "__main__":
filename1 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data32_1001.csv'
M1 = np.loadtxt(open(filename1, "rb"), delimiter=",")
filename2 = 'D:\Ricerca\DOTA\data\DOTmark_1.0\Data\ClassicImages\data32_1003.csv'
M2 = np.loadtxt(open(filename2, "rb"), delimiter=",")
C = ComputeDistanceMatrix(len(M1),p=1)
print(WassersteinDualCutting(M1, M2, C)) |
# Copyright 2014 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from keystoneclient.auth import identity
from keystoneclient import httpclient
aodhclient = importutils.try_import('aodhclient.v2.client')
barbicanclient = importutils.try_import('barbicanclient.client')
ceilometerclient = importutils.try_import('ceilometerclient.v2.client')
cinderclient = importutils.try_import('cinderclient.v2.client')
designateclient = importutils.try_import('designateclient.v1')
glanceclient = importutils.try_import('glanceclient.v2.client')
gnocchiclient = importutils.try_import('gnocchiclient.v1.client')
heatclient = importutils.try_import('heatclient.v1.client')
ironic_inspector_client = importutils.try_import('ironic_inspector_client.v1')
ironicclient = importutils.try_import('ironicclient.v1.client')
keystoneclient = importutils.try_import('keystoneclient.v3.client')
magnumclient = importutils.try_import('magnumclient.v1.client')
mistralclient = importutils.try_import('mistralclient.api.v2.client')
muranoclient = importutils.try_import('muranoclient.v1.client')
neutronclient = importutils.try_import('neutronclient.v2_0.client')
novaclient = importutils.try_import('novaclient.client')
senlinclient = importutils.try_import('senlinclient.v1.client')
swift_client = importutils.try_import('swiftclient.client')
tackerclient = importutils.try_import('tackerclient.v1_0.client')
troveclient = importutils.try_import('troveclient.v1.client')
zaqarclient = importutils.try_import('zaqarclient.queues.v2.client')
from mistral.actions.openstack import base
from mistral import context
from mistral.utils import inspect_utils
from mistral.utils.openstack import keystone as keystone_utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
IRONIC_API_VERSION = '1.22'
"""The default microversion to pass to Ironic API.
1.22 corresponds to Newton final.
"""
class NovaAction(base.OpenStackAction):
def _create_client(self):
ctx = context.ctx()
LOG.debug("Nova action security context: %s" % ctx)
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
nova_endpoint = keystone_utils.get_endpoint_for_project('nova')
client = novaclient.Client(
2,
username=None,
api_key=None,
endpoint_type=CONF.os_actions_endpoint_type,
service_type='compute',
auth_token=ctx.auth_token,
tenant_id=ctx.project_id,
region_name=keystone_endpoint.region,
auth_url=keystone_endpoint.url,
insecure=ctx.insecure
)
client.client.management_url = keystone_utils.format_url(
nova_endpoint.url,
{'tenant_id': ctx.project_id}
)
return client
@classmethod
def _get_fake_client(cls):
return novaclient.Client(2)
class GlanceAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return glanceclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Glance action security context: %s" % ctx)
glance_endpoint = keystone_utils.get_endpoint_for_project('glance')
return self._get_client_class()(
glance_endpoint.url,
region_name=glance_endpoint.region,
token=ctx.auth_token,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("fake_endpoint")
class KeystoneAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return keystoneclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Keystone action security context: %s" % ctx)
# TODO(akovi) cacert is deprecated in favor of session
# TODO(akovi) this piece of code should be refactored
# TODO(akovi) to follow the new guide lines
kwargs = {
'token': ctx.auth_token,
'auth_url': ctx.auth_uri,
'project_id': ctx.project_id,
'cacert': ctx.auth_cacert,
'insecure': ctx.insecure
}
# In case of trust-scoped token explicitly pass endpoint parameter.
if (ctx.is_trust_scoped
or keystone_utils.is_token_trust_scoped(ctx.auth_token)):
kwargs['endpoint'] = ctx.auth_uri
client = self._get_client_class()(**kwargs)
client.management_url = ctx.auth_uri
return client
@classmethod
def _get_fake_client(cls):
# Here we need to replace httpclient authenticate method temporarily
authenticate = httpclient.HTTPClient.authenticate
httpclient.HTTPClient.authenticate = lambda x: True
fake_client = cls._get_client_class()()
# Once we get fake client, return back authenticate method
httpclient.HTTPClient.authenticate = authenticate
return fake_client
class CeilometerAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return ceilometerclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Ceilometer action security context: %s" % ctx)
ceilometer_endpoint = keystone_utils.get_endpoint_for_project(
'ceilometer'
)
endpoint_url = keystone_utils.format_url(
ceilometer_endpoint.url,
{'tenant_id': ctx.project_id}
)
return self._get_client_class()(
endpoint_url,
region_name=ceilometer_endpoint.region,
token=ctx.auth_token,
username=ctx.user_name,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("")
class HeatAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return heatclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Heat action security context: %s" % ctx)
heat_endpoint = keystone_utils.get_endpoint_for_project('heat')
endpoint_url = keystone_utils.format_url(
heat_endpoint.url,
{
'tenant_id': ctx.project_id,
'project_id': ctx.project_id
}
)
return self._get_client_class()(
endpoint_url,
region_name=heat_endpoint.region,
token=ctx.auth_token,
username=ctx.user_name,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("")
class NeutronAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return neutronclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Neutron action security context: %s" % ctx)
neutron_endpoint = keystone_utils.get_endpoint_for_project('neutron')
return self._get_client_class()(
endpoint_url=neutron_endpoint.url,
region_name=neutron_endpoint.region,
token=ctx.auth_token,
auth_url=ctx.auth_uri,
insecure=ctx.insecure
)
class CinderAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return cinderclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Cinder action security context: %s" % ctx)
cinder_endpoint = keystone_utils.get_endpoint_for_project(
service_type='volumev2'
)
cinder_url = keystone_utils.format_url(
cinder_endpoint.url,
{
'tenant_id': ctx.project_id,
'project_id': ctx.project_id
}
)
client = self._get_client_class()(
ctx.user_name,
ctx.auth_token,
project_id=ctx.project_id,
auth_url=cinder_url,
region_name=cinder_endpoint.region,
insecure=ctx.insecure
)
client.client.auth_token = ctx.auth_token
client.client.management_url = cinder_url
return client
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()()
class MistralAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return mistralclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Mistral action security context: %s" % ctx)
# Check for trust scope token. This may occur if the action is
# called from a workflow triggered by a Mistral cron trigger.
if ctx.is_trust_scoped:
auth_url = None
mistral_endpoint = keystone_utils.get_endpoint_for_project(
'mistral'
)
mistral_url = mistral_endpoint.url
else:
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
auth_url = keystone_endpoint.url
mistral_url = None
return self._get_client_class()(
mistral_url=mistral_url,
auth_token=ctx.auth_token,
project_id=ctx.project_id,
user_id=ctx.user_id,
auth_url=auth_url,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()()
class TroveAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return troveclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Trove action security context: %s" % ctx)
trove_endpoint = keystone_utils.get_endpoint_for_project(
service_type='database'
)
trove_url = keystone_utils.format_url(
trove_endpoint.url,
{'tenant_id': ctx.project_id}
)
client = self._get_client_class()(
ctx.user_name,
ctx.auth_token,
project_id=ctx.project_id,
auth_url=trove_url,
region_name=trove_endpoint.region,
insecure=ctx.insecure
)
client.client.auth_token = ctx.auth_token
client.client.management_url = trove_url
return client
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("fake_user", "fake_passwd")
class IronicAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return ironicclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Ironic action security context: %s" % ctx)
ironic_endpoint = keystone_utils.get_endpoint_for_project('ironic')
return self._get_client_class()(
ironic_endpoint.url,
token=ctx.auth_token,
region_name=ironic_endpoint.region,
os_ironic_api_version=IRONIC_API_VERSION,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("http://127.0.0.1:6385/")
class BaremetalIntrospectionAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return ironic_inspector_client.ClientV1
@classmethod
def _get_fake_client(cls):
try:
# ironic-inspector client tries to get and validate it's own
# version when created. This might require checking the keystone
# catalog if the ironic-inspector server is not listening on the
# localhost IP address. Thus, we get a session for this case.
sess = keystone_utils.get_admin_session()
return cls._get_client_class()(session=sess)
except Exception as e:
LOG.warning("There was an error trying to create the "
"ironic-inspector client using a session: %s" % str(e))
# If it's not possible to establish a keystone session, attempt to
# create a client without it. This should fall back to where the
# ironic-inspector client tries to get it's own version on the
# default IP address.
LOG.debug("Attempting to create the ironic-inspector client "
"without a session.")
return cls._get_client_class()()
def _create_client(self):
ctx = context.ctx()
LOG.debug("Baremetal introspection action security context: %s" % ctx)
inspector_endpoint = keystone_utils.get_endpoint_for_project(
service_type='baremetal-introspection'
)
return self._get_client_class()(
api_version=1,
inspector_url=inspector_endpoint.url,
auth_token=ctx.auth_token,
)
class SwiftAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return swift_client.Connection
def _create_client(self):
ctx = context.ctx()
LOG.debug("Swift action security context: %s" % ctx)
swift_endpoint = keystone_utils.get_endpoint_for_project('swift')
kwargs = {
'preauthurl': swift_endpoint.url % {'tenant_id': ctx.project_id},
'preauthtoken': ctx.auth_token,
'insecure': ctx.insecure
}
return self._get_client_class()(**kwargs)
class ZaqarAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return zaqarclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Zaqar action security context: %s" % ctx)
zaqar_endpoint = keystone_utils.get_endpoint_for_project(
service_type='messaging')
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
opts = {
'os_auth_token': ctx.auth_token,
'os_auth_url': keystone_endpoint.url,
'os_project_id': ctx.project_id,
'insecure': ctx.insecure,
}
auth_opts = {'backend': 'keystone', 'options': opts}
conf = {'auth_opts': auth_opts}
return self._get_client_class()(zaqar_endpoint.url, conf=conf)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("")
@classmethod
def _get_client_method(cls, client):
method = getattr(cls, cls.client_method_name)
# We can't use partial as it's not supported by getargspec
@functools.wraps(method)
def wrap(*args, **kwargs):
return method(client, *args, **kwargs)
args = inspect_utils.get_arg_list_as_str(method)
# Remove client
wrap.__arguments__ = args.split(', ', 1)[1]
return wrap
@staticmethod
def queue_messages(client, queue_name, **params):
"""Gets a list of messages from the queue.
:param queue_name: Name of the target queue.
:type queue_name: `six.string_type`
:param params: Filters to use for getting messages.
:type params: **kwargs dict
:returns: List of messages.
:rtype: `list`
"""
queue = client.queue(queue_name)
return queue.messages(**params)
@staticmethod
def queue_post(client, queue_name, messages):
"""Posts one or more messages to a queue.
:param queue_name: Name of the target queue.
:type queue_name: `six.string_type`
:param messages: One or more messages to post.
:type messages: `list` or `dict`
:returns: A dict with the result of this operation.
:rtype: `dict`
"""
queue = client.queue(queue_name)
return queue.post(messages)
@staticmethod
def queue_pop(client, queue_name, count=1):
"""Pop `count` messages from the queue.
:param queue_name: Name of the target queue.
:type queue_name: `six.string_type`
:param count: Number of messages to pop.
:type count: int
:returns: List of messages.
:rtype: `list`
"""
queue = client.queue(queue_name)
return queue.pop(count)
class BarbicanAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return barbicanclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Barbican action security context: %s" % ctx)
barbican_endpoint = keystone_utils.get_endpoint_for_project('barbican')
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
auth = identity.v2.Token(
auth_url=keystone_endpoint.url,
tenant_name=ctx.user_name,
token=ctx.auth_token,
tenant_id=ctx.project_id
)
return self._get_client_class()(
project_id=ctx.project_id,
endpoint=barbican_endpoint.url,
auth=auth,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()(
project_id="1",
endpoint="http://127.0.0.1:9311"
)
@classmethod
def _get_client_method(cls, client):
if cls.client_method_name != "secrets_store":
return super(BarbicanAction, cls)._get_client_method(client)
method = getattr(cls, cls.client_method_name)
@functools.wraps(method)
def wrap(*args, **kwargs):
return method(client, *args, **kwargs)
args = inspect_utils.get_arg_list_as_str(method)
# Remove client.
wrap.__arguments__ = args.split(', ', 1)[1]
return wrap
@staticmethod
def secrets_store(client,
name=None,
payload=None,
algorithm=None,
bit_length=None,
secret_type=None,
mode=None, expiration=None):
"""Create and Store a secret in Barbican.
:param name: A friendly name for the Secret
:type name: string
:param payload: The unencrypted secret data
:type payload: string
:param algorithm: The algorithm associated with this secret key
:type algorithm: string
:param bit_length: The bit length of this secret key
:type bit_length: int
:param secret_type: The secret type for this secret key
:type secret_type: string
:param mode: The algorithm mode used with this secret keybit_length:
:type mode: string
:param expiration: The expiration time of the secret in ISO 8601 format
:type expiration: string
:returns: A new Secret object
:rtype: class:`barbicanclient.secrets.Secret'
"""
entity = client.secrets.create(
name,
payload,
algorithm,
bit_length,
secret_type,
mode,
expiration
)
entity.store()
return entity._get_formatted_entity()
class DesignateAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return designateclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Designate action security context: %s" % ctx)
designate_endpoint = keystone_utils.get_endpoint_for_project(
service_type='dns'
)
designate_url = keystone_utils.format_url(
designate_endpoint.url,
{'tenant_id': ctx.project_id}
)
client = self._get_client_class()(
endpoint=designate_url,
tenant_id=ctx.project_id,
auth_url=ctx.auth_uri,
region_name=designate_endpoint.region,
service_type='dns',
insecure=ctx.insecure
)
client.client.auth_token = ctx.auth_token
client.client.management_url = designate_url
return client
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()()
class MagnumAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return magnumclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Magnum action security context: %s" % ctx)
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
auth_url = keystone_endpoint.url
magnum_url = keystone_utils.get_endpoint_for_project('magnum').url
return self._get_client_class()(
magnum_url=magnum_url,
auth_token=ctx.auth_token,
project_id=ctx.project_id,
user_id=ctx.user_id,
auth_url=auth_url,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()(auth_url='X', magnum_url='X')
class MuranoAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return muranoclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Murano action security context: %s" % ctx)
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
murano_endpoint = keystone_utils.get_endpoint_for_project('murano')
return self._get_client_class()(
endpoint=murano_endpoint.url,
token=ctx.auth_token,
tenant=ctx.project_id,
region_name=murano_endpoint.region,
auth_url=keystone_endpoint.url,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("http://127.0.0.1:8082/")
class TackerAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return tackerclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Tacker action security context: %s" % ctx)
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
tacker_endpoint = keystone_utils.get_endpoint_for_project('tacker')
return self._get_client_class()(
endpoint_url=tacker_endpoint.url,
token=ctx.auth_token,
tenant_id=ctx.project_id,
region_name=tacker_endpoint.region,
auth_url=keystone_endpoint.url,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()()
class SenlinAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return senlinclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Senlin action security context: %s" % ctx)
keystone_endpoint = keystone_utils.get_keystone_endpoint_v2()
senlin_endpoint = keystone_utils.get_endpoint_for_project('senlin')
return self._get_client_class()(
endpoint_url=senlin_endpoint.url,
token=ctx.auth_token,
tenant_id=ctx.project_id,
region_name=senlin_endpoint.region,
auth_url=keystone_endpoint.url,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()("http://127.0.0.1:8778")
class AodhAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return aodhclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Aodh action security context: %s" % ctx)
aodh_endpoint = keystone_utils.get_endpoint_for_project(
'aodh'
)
endpoint_url = keystone_utils.format_url(
aodh_endpoint.url,
{'tenant_id': ctx.project_id}
)
return self._get_client_class()(
endpoint_url,
region_name=aodh_endpoint.region,
token=ctx.auth_token,
username=ctx.user_name,
insecure=ctx.insecure
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()()
class GnocchiAction(base.OpenStackAction):
@classmethod
def _get_client_class(cls):
return gnocchiclient.Client
def _create_client(self):
ctx = context.ctx()
LOG.debug("Gnocchi action security context: %s" % ctx)
gnocchi_endpoint = keystone_utils.get_endpoint_for_project(
'gnocchi'
)
endpoint_url = keystone_utils.format_url(
gnocchi_endpoint.url,
{'tenant_id': ctx.project_id}
)
return self._get_client_class()(
endpoint_url,
region_name=gnocchi_endpoint.region,
token=ctx.auth_token,
username=ctx.user_name
)
@classmethod
def _get_fake_client(cls):
return cls._get_client_class()()
|
<filename>kaspersky/src/kaspersky/master_yara/importer.py
"""Kaspersky Master YARA importer module."""
from typing import Any, Mapping, Optional
from pycti import OpenCTIConnectorHelper # type: ignore
from stix2 import Bundle, Identity, MarkingDefinition # type: ignore
from stix2.exceptions import STIXError # type: ignore
from kaspersky.client import KasperskyClient
from kaspersky.importer import BaseImporter
from kaspersky.master_yara.builder import YaraRuleBundleBuilder
from kaspersky.models import Yara, YaraRule
from kaspersky.utils import (
YaraRuleUpdater,
convert_yara_rules_to_yara_model,
datetime_to_timestamp,
datetime_utc_now,
is_current_weekday_before_datetime,
timestamp_to_datetime,
)
class MasterYaraImporter(BaseImporter):
"""Kaspersky Master YARA importer."""
_LATEST_MASTER_YARA_TIMESTAMP = "latest_master_yara_timestamp"
def __init__(
self,
helper: OpenCTIConnectorHelper,
client: KasperskyClient,
author: Identity,
tlp_marking: MarkingDefinition,
update_existing_data: bool,
master_yara_fetch_weekday: Optional[int],
master_yara_report_type: str,
master_yara_report_status: int,
) -> None:
"""Initialize Kaspersky Master YARA importer."""
super().__init__(helper, client, author, tlp_marking, update_existing_data)
self.master_yara_fetch_weekday = master_yara_fetch_weekday
self.master_yara_report_type = master_yara_report_type
self.master_yara_report_status = master_yara_report_status
self.yara_rule_updater = YaraRuleUpdater(self.helper)
def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Run importer."""
self._info(
"Running Kaspersky Master YARA importer (update data: {0})...",
self.update_existing_data,
)
latest_master_yara_timestamp = state.get(self._LATEST_MASTER_YARA_TIMESTAMP)
if latest_master_yara_timestamp is None:
latest_master_yara_datetime = None
else:
latest_master_yara_datetime = timestamp_to_datetime(
latest_master_yara_timestamp
)
master_yara_fetch_weekday = self.master_yara_fetch_weekday
if master_yara_fetch_weekday is not None:
if not is_current_weekday_before_datetime(
master_yara_fetch_weekday, latest_master_yara_datetime
):
self._info("It is not time to fetch the Master YARA yet.")
return state
yara = self._fetch_master_yara()
yara_rules = yara.rules
yara_rule_count = len(yara_rules)
self._info(
"Master YARA with {0} rules...",
yara_rule_count,
)
new_yara_rules = self.yara_rule_updater.update_existing(yara.rules)
new_yara_rule_count = len(new_yara_rules)
self._info(
"{0} new YARA rules...",
new_yara_rule_count,
)
failed_count = 0
for yara_rule in new_yara_rules:
result = self._process_yara_rule(yara_rule)
if not result:
failed_count += 1
success_count = new_yara_rule_count - failed_count
self._info(
"Kaspersky Master YARA importer completed (imported: {0}, total: {1})",
success_count,
new_yara_rule_count,
)
return {
self._LATEST_MASTER_YARA_TIMESTAMP: datetime_to_timestamp(
datetime_utc_now()
)
}
def _fetch_master_yara(self) -> Yara:
report_group = "apt"
master_yara = self.client.get_master_yara(report_group)
return convert_yara_rules_to_yara_model(master_yara, imports_at_top=True)
def _process_yara_rule(self, yara_rule: YaraRule) -> bool:
self._info("Processing YARA rule {0}...", yara_rule.name)
yara_rule_bundle = self._create_yara_rule_bundle(yara_rule)
if yara_rule_bundle is None:
return False
# bundle_id = uuid5(yara_rule.name)
# with open(f"yara_rule_bundle_{bundle_id}.json", "w") as f:
# f.write(yara_rule_bundle.serialize(pretty=True))
self._send_bundle(yara_rule_bundle)
return True
def _create_yara_rule_bundle(self, yara_rule: YaraRule) -> Optional[Bundle]:
author = self.author
object_markings = [self.tlp_marking]
source_name = self._source_name()
confidence_level = self._confidence_level()
report_type = self.master_yara_report_type
report_status = self.master_yara_report_status
bundle_builder = YaraRuleBundleBuilder(
yara_rule,
author,
object_markings,
source_name,
confidence_level,
report_type,
report_status,
)
try:
return bundle_builder.build()
except STIXError as e:
self._error(
"Failed to build YARA rule bundle for '{0}': {1}",
yara_rule.name,
e,
)
return None
|
<reponame>D-ICE/chrono<filename>src/demos/python/chrono-tensorflow/envs/chtrain_pendulum.py<gh_stars>1-10
import pychrono as chrono
from pychrono import irrlicht as chronoirr
#from pychrono import postprocess
import numpy as np
class Model(object):
def __init__(self, render):
self.render = render
#self.size_rod_y = l
self.observation_space= np.empty([4,1])
self.action_space= np.empty([1,1])
self.info = {}
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
self.rev_pend_sys = chrono.ChSystemNSC()
# Set the default outward/inward shape margins for collision detection,
# this is epecially important for very large or very small objects.
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001)
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001)
#rev_pend_sys.SetSolverType(chrono.ChSolver.Type_BARZILAIBORWEIN) # precise, more slow
self.rev_pend_sys.SetMaxItersSolverSpeed(70)
# Create a contact material (surface property)to share between all objects.
# The rolling and spinning parameters are optional - if enabled they double
# the computational time.
# non ho cantatti, da vedere se è necessario tenere tutto il baraccone
self.rod_material = chrono.ChMaterialSurfaceNSC()
self.rod_material.SetFriction(0.5)
self.rod_material.SetDampingF(0.2)
self.rod_material.SetCompliance (0.0000001)
self.rod_material.SetComplianceT(0.0000001)
# rod_material.SetRollingFriction(rollfrict_param)
# rod_material.SetSpinningFriction(0)
# rod_material.SetComplianceRolling(0.0000001)
# rod_material.SetComplianceSpinning(0.0000001)
# Create the set of rods in a vertical stack, along Y axis
self.size_rod_y = 2.0
self.radius_rod = 0.05
self.density_rod = 50; # kg/m^3
self.mass_rod = self.density_rod * self.size_rod_y *chrono.CH_C_PI* (self.radius_rod**2);
self.inertia_rod_y = (self.radius_rod**2) * self.mass_rod/2;
self.inertia_rod_x = (self.mass_rod/12)*((self.size_rod_y**2)+3*(self.radius_rod**2))
self.size_table_x = 0.3;
self.size_table_y = 0.3;
if self.render:
self.size_table_z = 0.3;
self.myapplication = chronoirr.ChIrrApp(self.rev_pend_sys)
self.myapplication.AddShadowAll();
self.myapplication.SetStepManage(True)
self.myapplication.SetTimestep(0.01)
self. myapplication.SetTryRealtime(True)
self.myapplication.AddTypicalSky('../data/skybox/')
self.myapplication.AddTypicalCamera(chronoirr.vector3df(0.5,0.5,1.0))
self.myapplication.AddLightWithShadow(chronoirr.vector3df(2,4,2), # point
chronoirr.vector3df(0,0,0), # aimpoint
9, # radius (power)
1,9, # near, far
30) # angle of FOV
def reset(self):
#print("reset")
self.isdone = False
self.rev_pend_sys.Clear()
# create it
self.body_rod = chrono.ChBody()
# set initial position
self.body_rod.SetPos(chrono.ChVectorD(0, self.size_rod_y/2, 0 ))
# set mass properties
self.body_rod.SetMass(self.mass_rod)
# una volta che le hao calcolate inserisci inerzie diverse
self.body_rod.SetInertiaXX(chrono.ChVectorD(self.inertia_rod_x,self.inertia_rod_y,self.inertia_rod_x))
# set collision surface properties
self.body_rod.SetMaterialSurface(self.rod_material)
# Visualization shape, for rendering animation
# vettori visual cilindro
self.cyl_base1= chrono.ChVectorD(0, -self.size_rod_y/2, 0 )
self.cyl_base2= chrono.ChVectorD(0, self.size_rod_y/2, 0 )
#body_rod_shape = chrono.ChCylinder(cyl_base1, cyl_base2, radius_rod)
self.body_rod_shape = chrono.ChCylinderShape()
self.body_rod_shape.GetCylinderGeometry().p1= self.cyl_base1
self.body_rod_shape.GetCylinderGeometry().p2= self.cyl_base2
self.body_rod_shape.GetCylinderGeometry().rad= self.radius_rod
#body_rod.GetAssets().push_back(body_rod_shape)
self.body_rod.AddAsset(self.body_rod_shape)
self.rev_pend_sys.Add(self.body_rod)
self.body_floor = chrono.ChBody()
self.body_floor.SetBodyFixed(True)
self.body_floor.SetPos(chrono.ChVectorD(0, -5, 0 ))
self.body_floor.SetMaterialSurface(self.rod_material)
# Visualization shape
if self.render:
self.body_floor_shape = chrono.ChBoxShape()
self.body_floor_shape.GetBoxGeometry().Size = chrono.ChVectorD(3, 1, 3)
self.body_floor.GetAssets().push_back(self.body_floor_shape)
self.body_floor_texture = chrono.ChTexture()
self.body_floor_texture.SetTextureFilename('../../../data/concrete.jpg')
self.body_floor.GetAssets().push_back(self.body_floor_texture)
self.rev_pend_sys.Add(self.body_floor)
self.body_table = chrono.ChBody()
self.body_table.SetPos(chrono.ChVectorD(0, -self.size_table_y/2, 0 ))
self.body_table.SetMaterialSurface(self.rod_material)
# Visualization shape
if self.render:
self.body_table_shape = chrono.ChBoxShape()
self.body_table_shape.GetBoxGeometry().Size = chrono.ChVectorD(self.size_table_x/2, self.size_table_y/2, self.size_table_z/2)
self.body_table_shape.SetColor(chrono.ChColor(0.4,0.4,0.5))
self.body_table.GetAssets().push_back(self.body_table_shape)
self.body_table_texture = chrono.ChTexture()
self.body_table_texture.SetTextureFilename('../../../data/concrete.jpg')
self.body_table.GetAssets().push_back(self.body_table_texture)
self.body_table.SetMass(0.1)
self.rev_pend_sys.Add(self.body_table)
# Create a constraint that blocks free 3 x y z translations and 3 rx ry rz rotations
# of the table respect to the floor, and impose that the relative imposed position
# depends on a specified motion law.
self.link_slider = chrono.ChLinkLockPrismatic()
z2x = chrono.ChQuaternionD()
z2x.Q_from_AngAxis(-chrono.CH_C_PI / 2 , chrono.ChVectorD(0, 1, 0))
self.link_slider.Initialize(self.body_table, self.body_floor, chrono.ChCoordsysD(chrono.ChVectorD(0, 0, 0), z2x))
self.rev_pend_sys.Add(self.link_slider)
self.link_slider.SetMotion_axis(chrono.ChVectorD(1,0,0))
#attuatore lineare
self.act_initpos = chrono.ChVectorD(0,0,0)
self.actuator = chrono.ChLinkMotorLinearForce()
self.actuator.Initialize(self.body_table, self.body_floor, chrono.ChFrameD(self.act_initpos))
self.rev_pend_sys.Add(self.actuator)
# ..create the function for imposed y vertical motion, etc.
# tolta: solo traslazione asse z
#mfunY = chrono.ChFunction_Sine(0,1.5,0.001) # phase, frequency, amplitude
#link_slider.SetMotion_Y(mfunY)
# ..create the function for imposed z horizontal motion, etc.
#self.mfunX = chrono.ChFunction_Sine(0,1.5,0.4) # phase, frequency, amplitude
#self.link_slider.SetMotion_X(self.action)
# Note that you could use other types of ChFunction_ objects, or create
# your custom function by class inheritance (see demo_python.py), or also
# set a function for table rotation , etc.
# REVLOLUTE JOINT:
# create frames for the joint
self.rod_pin = chrono.ChMarker()
self.body_rod.AddMarker(self.rod_pin)
self.rod_pin.Impose_Abs_Coord(chrono.ChCoordsysD(chrono.ChVectorD(0,0,0)))
self.table_pin = chrono.ChMarker()
self.body_table.AddMarker(self.table_pin)
self.table_pin.Impose_Abs_Coord(chrono.ChCoordsysD(chrono.ChVectorD(0,0,0)))
self.pin_joint = chrono.ChLinkLockRevolute()
self.pin_joint.Initialize(self.rod_pin, self.table_pin)
self.rev_pend_sys.Add(self.pin_joint)
if self.render:
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
self.myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
self.myapplication.AssetUpdateAll();
# If you want to show shadows because you used "AddLightWithShadow()'
# you must remember this:
self.isdone= False
self.steps= 0
self.step(np.array([[0]]))
return self.get_ob()
# end reset
# while in script pr
# do step prima e al limite + step per frame grafico
# begin draw all tutto insieme
def step(self, ac):
action=float(ac[0])
self.steps += 1
self.ac = chrono.ChFunction_Const(action)
self.actuator.SetForceFunction(self.ac)
self.omega = self.pin_joint.GetRelWvel().Length()
if self.render:
self.myapplication.GetDevice().run()
self.myapplication.BeginScene()
self.myapplication.DrawAll()
self.myapplication.DoStep()
else:
self.rev_pend_sys.DoStepDynamics(self.timestep)
self.rew = 1.0
self.obs= self.get_ob()
if self.render:
self.myapplication.EndScene()
self.is_done()
return self.obs, self.rew, self.isdone, self.info
def get_ob(self):
#self.omega2 = self.pin_joint.GetRelWvel().z()
self.state = [self.link_slider.GetDist(), self.link_slider.GetDist_dt(), self.pin_joint.GetRelAngle(), self.omega]
return np.asarray(self.state)
# def calc_rew(self):
# self.rew= -(self.pin_joint.GetRelAngle()^2 + 0.1*self.omega^2 + 0.001*self.ac^2)
def is_done(self):
if abs(self.link_slider.GetDist()) > 2 or self.steps> 100000 or abs(self.pin_joint.GetRelAngle()) > 0.2 :
self.isdone = True
def ScreenCapture(self, interval):
try:
self.myapplication.SetVideoframeSave(True)
self.myapplication.SetVideoframeSaveInterval(interval)
except:
print('No ChIrrApp found. Cannot save video frames.')
def __del__(self):
self.myapplication.GetDevice().closeDevice()
print('Destructor called, Device deleted.')
|
from copy import deepcopy
import jsonschema
from django.contrib.postgres.fields.jsonb import JSONField
from django.core import exceptions
from django.db import DataError, connection, models
from prettytable import from_db_cursor
from rest_framework import exceptions
from .utils import DefaultValidatingDraft4Validator, values_from_json
class Ruleset(models.Model):
program = models.TextField(null=False, blank=False)
entity = models.TextField(null=False, blank=False)
sample_input = JSONField(null=True, blank=True)
null_sources = JSONField(null=True, blank=True, default={})
class Meta:
unique_together = (("program", "entity"), )
def validate(self, applications):
"""
Validate payload against this ruleset's syntax schemas.
Includes using it to fill in default values from the schema.
Returns the validated payload.
"""
for syntax_schema in self.syntaxschema_set.all():
try:
DefaultValidatingDraft4Validator(
syntax_schema.code).validate(applications)
except jsonschema.ValidationError as valerr:
raise exceptions.ParseError(str(valerr))
return applications
@property
def schema(self):
return self.syntaxschema_set.first()
def flattened(self, payload):
applicants = payload.pop('applicants')
for applicant in applicants:
applicant_info = deepcopy(payload)
applicant_info.update(applicant)
yield applicant_info
def null_source_sql(self, raw):
for (key, val) in self.null_sources.items():
if key not in raw:
yield " %s as ( select * from %s ) " % (key, val)
def source_sql_statements(self, raw):
with connection.cursor() as cursor:
for (source_sql, source_data) in values_from_json(
raw, self.schema):
table_name = source_sql.split()[0]
source_sql = "with " + source_sql + " select * from " + table_name
source_sql = source_sql.replace("%s", "'%s'") % source_data
yield (source_sql)
cursor.execute(source_sql)
yield str(from_db_cursor(cursor))
def values_from_json(self, raw):
(source_sql,
source_data) = zip(*(values_from_json(raw, schema=self.schema)))
source_sql += tuple(self.null_source_sql(raw))
source_clause = 'WITH ' + ',\n'.join(source_sql)
return (source_clause, source_data)
def calc(self, application):
overall_result = {}
for applicant in self.flattened(application):
eligibility = True
result = {'requirements': {}}
(source_clause, source_data) = self.values_from_json(applicant)
for node in self.node_set.filter(parent__isnull=True):
node_result = node.calc(source_clause, source_data)
result['requirements'][node.name] = node_result
if node.name != 'categories':
eligibility &= node_result['eligible']
result['eligible'] = eligibility
overall_result[int(applicant['id'])] = result
categories = result['requirements'].pop('categories', {})
category_names = [
key
for (key, val) in categories.get('subfindings', {}).items()
if val['eligible']
]
result['categories'] = {
'applicable': category_names,
'findings': categories.get('subfindings', {})
}
overall_result[int(applicant['id'])] = result
return overall_result
def sql(self, application):
for applicant in self.flattened(application):
(source_clause, source_data) = self.values_from_json(applicant)
for node in self.node_set.all():
yield from node.sql(source_clause, source_data)
class Node(models.Model):
name = models.TextField(null=False, blank=False)
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True)
ruleset = models.ForeignKey(Ruleset, null=True, on_delete=models.CASCADE)
requires_all = models.BooleanField(null=False, blank=False, default=False)
class Meta:
unique_together = (("name", "parent", "ruleset"), )
@property
def get_ruleset(self):
return self.ruleset or self.parent.get_ruleset
def sql(self, source_clause, source_data):
for rule in self.rule_set.all():
yield rule.sql(source_clause, source_data)
def calc(self, source_clause, source_data):
if self.requires_all:
eligibility = True
else:
eligibility = False
node_result = {'limitation': [], 'explanation': [], 'subfindings': {}}
for child_node in self.node_set.all():
child_node_result = child_node.calc(source_clause, source_data)
if self.requires_all:
eligibility &= child_node_result['eligible']
else:
eligibility |= child_node_result['eligible']
node_result['explanation'].append(child_node_result['explanation'])
if child_node_result['eligible'] and child_node_result['limitation']:
node_result['limitation'].append(
child_node_result['limitation'])
node_result['subfindings'][child_node.name] = child_node_result
for rule in self.rule_set.all():
rule_result = rule.calc(source_clause, source_data)
node_result['explanation'].append(rule_result['explanation'])
if self.requires_all:
eligibility &= rule_result['eligible']
else:
eligibility |= rule_result['eligible']
if rule_result['eligible'] and rule_result['limitation']:
node_result['limitation'].append(rule_result['limitation'])
node_result['subfindings'][rule.name] = rule_result
node_result['eligible'] = eligibility
return node_result
class Rule(models.Model):
name = models.TextField(null=False, blank=False)
code = models.TextField(null=True, blank=True)
node = models.ForeignKey(Node, on_delete=models.CASCADE)
class Meta:
unique_together = (("name", "node"), )
@property
def ruleset(self):
return self.node.get_ruleset
_SQL = """with source as (%s %s)
select (source.result).eligible,
(source.result).explanation,
((source.result).limitation).end_date,
((source.result).limitation).normal,
((source.result).limitation).description,
((source.result).limitation).explanation AS limitation_explanation
from source"""
def calc(self, source_clause, source_data):
with connection.cursor() as cursor:
sql = self._SQL % (source_clause, self.code)
try:
cursor.execute(sql, tuple(source_data))
except Exception as exc:
msg = ("Error executing rule %s\n" % self.name + str(exc) +
'\n\n in sql:\n\n' + sql)
raise DataError(msg)
findings = cursor.fetchone()
limitation = dict(
zip(('end_date', 'normal', 'description', 'explanation'),
findings[2:]))
if (not limitation['end_date']) and (not limitation['description']):
limitation = None
return {
'eligible': findings[0],
'explanation': findings[1],
'limitation': limitation
}
def sql(self, source_clause, source_data):
result = self._SQL % (source_clause, self.code)
result = result.replace("%s", "'%s'")
return result % source_data
class SyntaxSchema(models.Model):
ruleset = models.ForeignKey(Ruleset, on_delete=models.CASCADE)
type = models.TextField(null=False, blank=False, default='jsonschema')
code = JSONField(null=False, blank=False)
def walk(self, node=None):
"""Yields all the dictionaries in a nested structure."""
node = node or self.code
if isinstance(node, list):
for itm in node:
yield from self.walk(itm)
else:
yield node
for (key, val) in node.items():
if isinstance(val, dict):
yield from self.walk(val)
_JSONSCHEMA_TO_PG_TYPES = {
'integer': 'integer',
'number': 'numeric',
'string': 'text',
'date': 'date',
'boolean': 'boolean',
}
def _col_data_type(self, col_data):
if col_data.get('format') == 'date-time':
return 'date'
elif col_data.get('$ref') == '#/definitions/ynexception':
return 'text'
else:
data_type = col_data.get('type', 'text')
if isinstance(data_type, list):
data_type = [dt for dt in data_type if dt != 'null']
if len(data_type) > 1:
data_type = 'text'
else:
data_type = data_type[0]
return self._JSONSCHEMA_TO_PG_TYPES.get(data_type)
def data_types(self):
result = {}
for node in self.walk():
for (col_name, col_data) in node.get('properties', {}).items():
col_type_from_schema = self._col_data_type(col_data)
if col_type_from_schema:
result[col_name] = self._col_data_type(col_data)
return result
# todo: this should be one-to-one, or sorted so that the
# type-determiner comesfirst?
|
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
from theano.tensor.signal import downsample
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import sys
import glob
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
import random
srng = RandomStreams()
X_train = np.load('X_train_zca.npy')
y_train = np.genfromtxt('../data/y_train.txt')
X_test = np.load('X_test_zca.npy')
convolutional_layers = 6
feature_maps = [3,80,80,160,160,320,320]
filter_shapes = [(3,3),(3,3),(3,3),(3,3),(3,3),(3,3)]
feedforward_layers = 1
feedforward_nodes = [2000]
classes = 10
class convolutional_layer(object):
def __init__(self, input, output_maps, input_maps, filter_height, filter_width, maxpool=None):
self.input = input
self.w = theano.shared(self.ortho_weights(output_maps,input_maps,filter_height,filter_width),borrow=True)
self.b = theano.shared(np.zeros((output_maps,), dtype=theano.config.floatX),borrow=True)
self.conv_out = conv2d(input=self.input, filters=self.w, border_mode='half')
if maxpool:
self.conv_out = downsample.max_pool_2d(self.conv_out, ds=maxpool, ignore_border=True)
self.output = T.nnet.elu(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def ortho_weights(self,chan_out,chan_in,filter_h,filter_w):
bound = np.sqrt(6./(chan_in*filter_h*filter_w + chan_out*filter_h*filter_w))
W = np.random.random((chan_out, chan_in * filter_h * filter_w))
u, s, v = np.linalg.svd(W,full_matrices=False)
if u.shape[0] != u.shape[1]:
W = u.reshape((chan_out, chan_in, filter_h, filter_w))
else:
W = v.reshape((chan_out, chan_in, filter_h, filter_w))
return W.astype(theano.config.floatX)
def get_params(self):
return self.w,self.b
class feedforward_layer(object):
def __init__(self,input,features,nodes):
self.input = input
self.bound = np.sqrt(1.5/(features+nodes))
self.w = theano.shared(self.ortho_weights(features,nodes),borrow=True)
self.b = theano.shared(np.zeros((nodes,), dtype=theano.config.floatX),borrow=True)
self.output = T.nnet.sigmoid(-T.dot(self.input,self.w)-self.b)
def ortho_weights(self,fan_in,fan_out):
bound = np.sqrt(2./(fan_in+fan_out))
W = np.random.randn(fan_in,fan_out)*bound
u, s, v = np.linalg.svd(W,full_matrices=False)
if u.shape[0] != u.shape[1]:
W = u
else:
W = v
return W.astype(theano.config.floatX)
def get_params(self):
return self.w,self.b
class neural_network(object):
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes):
self.input = T.tensor4()
self.convolutional_layers = []
self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1]))
for i in range(1,convolutional_layers):
if i==2 or i==4:
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2)))
else:
self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1]))
self.feedforward_layers = []
self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),20480,feedforward_nodes[0]))
for i in range(1,feedforward_layers):
self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
self.params = []
for l in self.convolutional_layers + self.feedforward_layers:
self.params.extend(l.get_params())
self.params.extend(self.output_layer.get_params())
self.target = T.matrix()
self.output = self.output_layer.output
self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
self.cost = self.cost.mean()
self.updates = self.adam(self.cost, self.params)
self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
def adam(self, cost, params, lr=0.0002, b1=0.1, b2=0.01, e=1e-8):
updates = []
grads = T.grad(cost, params)
self.i = theano.shared(np.float32(0.))
i_t = self.i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
self.m = theano.shared(p.get_value() * 0.)
self.v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * self.m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * self.v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((self.m, m_t))
updates.append((self.v, v_t))
updates.append((p, p_t))
updates.append((self.i, i_t))
return updates
def train(self,X,y,batch_size=None):
if batch_size:
indices = np.random.permutation(X.shape[0])[:batch_size]
crop1 = np.random.randint(-5,6)
crop2 = np.random.randint(-5,6)
X = X[indices,:,:,:]
if crop1 > 0:
X = np.concatenate((X[:,:,crop1:,:],np.zeros((batch_size,3,crop1,32))),axis=2)
elif crop1 < 0:
X = np.concatenate((np.zeros((batch_size,3,-crop1,32)),X[:,:,:crop1,:]),axis=2)
if crop2 > 0:
X = np.concatenate((X[:,:,:,crop2:],np.zeros((batch_size,3,32,crop2))),axis=3)
elif crop2 < 0:
X = np.concatenate((np.zeros((batch_size,3,32,-crop2)),X[:,:,:,:crop2]),axis=3)
y = y[indices]
y = np.concatenate((y,np.arange(10))) #make sure y includes all possible labels
target = np.zeros((y.shape[0],len(np.unique(y))))
for i in range(len(np.unique(y))):
target[y==i,i] = 1
target = target[:-10,:] #drop extra labels inserted at end
if random.random() < .5:
X = X[:,:,:,::-1]
return self.propogate(X,target)
def predict(self,X):
prediction = self.classify(X)
return np.argmax(prediction,axis=1)
print "building neural network"
nn = neural_network(convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes)
batch_size = 100
for i in range(80000):
cost = nn.train(X_train,y_train,batch_size)
sys.stdout.write("step %i loss: %f \r" % (i+1, cost))
sys.stdout.flush()
if (i+1)%5000 == 0:
preds = []
for j in range(0,X_test.shape[0],batch_size):
preds.append(nn.predict(X_test[j:j+batch_size,:]))
pred = np.concatenate(preds)
np.savetxt('prediction.txt',pred,fmt='%.0f')
|
<reponame>mjq11302010044/TATT
import math
import torch
import torch.nn.functional as F
from torch import nn
from collections import OrderedDict
import sys
from torch.nn import init
import numpy as np
from IPython import embed
sys.path.append('./')
sys.path.append('../')
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .recognizer.stn_head import STNHead
from . import AttentionalImageLoss
class TextZoom(nn.Module):
def __init__(self, scale_factor=2, width=128, height=32, STN=False, rrb_nums=6):
super(TextZoom, self).__init__()
assert math.log(scale_factor, 2) % 1 == 0
upsample_block_num = int(math.log(scale_factor, 2))
self.block1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=9, padding=4),
nn.PReLU()
)
self.rrb_nums = rrb_nums
for i in range(rrb_nums):
setattr(self, 'block%d' % (i + 2), RecurrentResidualBlock(64))
setattr(self, 'block%d' % (rrb_nums + 2),
nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64)
))
block_ = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]
block_.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))
setattr(self, 'block%d' % (rrb_nums + 3), nn.Sequential(*block_))
self.tps_inputsize = [height//scale_factor, width//scale_factor]
tps_outputsize = [height//scale_factor, width//scale_factor]
num_control_points = 20
tps_margins = [0.05, 0.05]
self.stn = STN
if self.stn:
self.tps = TPSSpatialTransformer(
output_image_size=tuple(tps_outputsize),
num_control_points=num_control_points,
margins=tuple(tps_margins))
self.stn_head = STNHead(
in_planes=3,
num_ctrlpoints=num_control_points,
activation='none')
def forward(self, x):
if self.stn and self.training:
# x = F.interpolate(x, self.tps_inputsize, mode='bilinear', align_corners=True)
_, ctrl_points_x = self.stn_head(x)
x, _ = self.tps(x, ctrl_points_x)
block = {'1': self.block1(x)}
for i in range(self.rrb_nums + 1):
block[str(i + 2)] = getattr(self, 'block%d' % (i + 2))(block[str(i + 1)])
block[str(self.rrb_nums + 3)] = getattr(self, 'block%d' % (self.rrb_nums + 3)) \
((block['1'] + block[str(self.rrb_nums + 2)]))
output = torch.tanh(block[str(self.rrb_nums + 3)])
return output
class RecurrentResidualBlock(nn.Module):
def __init__(self, channels):
super(RecurrentResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(channels)
self.gru1 = GruBlock(channels, channels)
# self.prelu = nn.PReLU()
self.prelu = mish()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(channels)
self.gru2 = GruBlock(channels, channels)
def forward(self, x):
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.prelu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.gru1(residual.transpose(-1, -2)).transpose(-1, -2)
# residual = self.non_local(residual)
return self.gru2(x + residual)
class UpsampleBLock(nn.Module):
def __init__(self, in_channels, up_scale):
super(UpsampleBLock, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(up_scale)
# self.prelu = nn.PReLU()
self.prelu = mish()
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
x = self.prelu(x)
return x
class mish(nn.Module):
def __init__(self, ):
super(mish, self).__init__()
self.activated = True
def forward(self, x):
if self.activated:
x = x * (torch.tanh(F.softplus(x)))
return x
class GruBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(GruBlock, self).__init__()
assert out_channels % 2 == 0
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
self.gru = nn.GRU(out_channels, out_channels // 2, bidirectional=True, batch_first=True)
def forward(self, x):
x = self.conv1(x)
x = x.permute(0, 2, 3, 1).contiguous()
b = x.size()
x = x.view(b[0] * b[1], b[2], b[3])
x, _ = self.gru(x)
x = x.view(b[0], b[1], b[2], b[3])
x = x.permute(0, 3, 1, 2)
return x
if __name__ == '__main__':
img = torch.zeros(7, 3, 16, 64)
embed()
|
import pandas as pd
import random as rd
rd.seed(2020)
def read_csv(filename):
file = pd.read_csv(filename)
file.fillna(" ", inplace=True)
return file
def save_csv(filename, data):
data.to_csv(filename, mode="w", index=False)
def train_shuffle_before_after(data):
data_size = len(data)
data["INDEX"] = range(data_size)
data.reset_index(inplace=True, drop=True)
positive_data = data.copy()[["INDEX", "ORIGINAL_INDEX", "LABEL", "AFTER_HEADLINE", "AFTER_BODY", "BEFORE_HEADLINE", "BEFORE_BODY"]]
negative_data = data.copy()
negative_data["LABEL"] = 1
negative_data = negative_data[["INDEX", "ORIGINAL_INDEX", "LABEL", "AFTER_HEADLINE", "AFTER_BODY", "BEFORE_HEADLINE", "BEFORE_BODY"]]
negative_data.rename(columns={"AFTER_HEADLINE": "BEFORE_HEADLINE", "AFTER_BODY": "BEFORE_BODY", "BEFORE_HEADLINE":
"AFTER_HEADLINE", "BEFORE_BODY": "AFTER_BODY"}, inplace=True)
concat_data = pd.concat([negative_data, positive_data])
shuffled_data = concat_data.sample(frac=1)
return shuffled_data
def shuffle_before_after(data):
data_size = len(data)
data["INDEX"] = range(data_size)
data.reset_index(inplace=True, drop=True)
shuffle_idx = rd.sample(range(data_size), k=data_size//2)
cond = data["INDEX"].isin(shuffle_idx)
positive_data = data[~cond]
positive_data = positive_data[["INDEX", "ORIGINAL_INDEX", "LABEL", "AFTER_HEADLINE", "AFTER_BODY", "BEFORE_HEADLINE", "BEFORE_BODY"]]
negative_data = data[cond]
negative_data["LABEL"] = 1
negative_data = negative_data[["INDEX", "ORIGINAL_INDEX", "LABEL", "AFTER_HEADLINE", "AFTER_BODY", "BEFORE_HEADLINE", "BEFORE_BODY"]]
negative_data.rename(columns={"AFTER_HEADLINE": "BEFORE_HEADLINE", "AFTER_BODY": "BEFORE_BODY", "BEFORE_HEADLINE":
"AFTER_HEADLINE", "BEFORE_BODY": "AFTER_BODY"}, inplace=True)
shuffled_data = pd.concat([negative_data, positive_data])
return shuffled_data
def split_train_valid_test(data, ratio=(0.75, 0.20, 0.05)):
num_data = len(data)
print("Number of dataset : {}".format(num_data))
data.rename(columns={"INDEX": "ORIGINAL_INDEX"}, inplace=True)
rd_ind = rd.sample(range(num_data), num_data)
data = data.reindex(rd_ind)
data['LABEL'] = 0
train_end_id = int(num_data * ratio[0])
valid_end_id = int(num_data * ratio[1])
train_data = data.iloc[:train_end_id]
valid_data = data.iloc[train_end_id:train_end_id + valid_end_id]
test_data = data.iloc[train_end_id + valid_end_id:]
return train_data, valid_data, test_data
def main():
DATA_PATH = "data_in/training.csv"
TRAIN_SAVE_PATH = "data_in/train.csv"
VALID_SAVE_PATH = "data_in/dev.csv"
TEST_SAVE_PATH = "data_in/test.csv"
data = read_csv(DATA_PATH)
train_data, valid_data, test_data = split_train_valid_test(data)
shuffled_train_data = train_shuffle_before_after(train_data)
save_csv(TRAIN_SAVE_PATH, shuffled_train_data)
shuffled_valid_data = shuffle_before_after(valid_data)
save_csv(VALID_SAVE_PATH, shuffled_valid_data)
shuffled_test_data = shuffle_before_after(test_data)
save_csv(TEST_SAVE_PATH, shuffled_test_data)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import sys
import socket
from time import sleep
import threading
import socket
from enum import Enum, auto
SYNCWORD_H = 0xBE
SYNCWORD_L = 0xEF
class ParseState(Enum):
SYNC_H = 0
SYNC_L = 1
FLAGS = 2
CMD = 3
PAYLOAD_LEN = 4
PAYLOAD = 5
CHKSUM_H = 6
CHKSUM_L = 7
class Command(Enum):
NOP = 0x00
RESET = 0x01
TXDATA = 0x02
GET_TXPWR = 0x03
SET_TXPWR = 0x04
RXDATA = 0x10
ERROR = 0x7F
REPLY = 0x80
def fletcher(chksum, byte):
lsb = chksum & 0xFF
msb = chksum >> 8
msb += byte
msb &= 0xFF
lsb += msb
lsb &= 0xFF
return (msb << 8) | lsb
def compute_chksum(data):
chksum = 0
for x in data:
chksum = fletcher(chksum, ord(x))
return chksum
def create_tx_pkt(data):
pkt = '\x02'
pkt += chr(len(data))
pkt += data
chksum = compute_chksum(pkt)
pkt += chr(chksum >> 8)
pkt += chr(chksum & 0xFF)
pkt = '\xBE\xEF' + pkt
return pkt
class RadioException(Exception):
def __init__(self, code):
self.code = code
if code == 0:
self.error = 'ESUCCESS'
self.msg = 'command succeeded'
elif code == 1:
self.error = 'ETIMEOUT'
self.msg = 'timeout waiting for CTS'
elif code == 2:
self.error = 'EWRONGPART'
self.msg = 'unsupported part number'
elif code == 3:
self.error = 'EINVAL'
self.msg = 'invalid parameter'
elif code == 4:
self.error = 'EINVALSTATE'
self.msg = 'invalid internal state'
elif code == 5:
self.error = 'ETOOLONG'
self.msg = 'packet too long'
elif code == 6:
self.error = 'ECHKSUM'
self.msg = 'invalid checksum'
elif code == 7:
self.error = 'EBUSY'
self.msg = 'pending operation'
elif code == 8:
self.error = 'ERXTIMEOUT'
self.msg = 'Si446x RX timed out (zero len bug?)'
else:
self.error = 'UNKNOWN'
self.msg = 'An unknown error occurred'
def __str__(self):
return 'RadioException(' + self.error + '): ' + self.msg
pass
class Radio:
def __init__(self, host, port=2600):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.state = ParseState.SYNC_H
def tx(self, data):
pkt = chr(Command.TXDATA.value)
pkt += chr(len(data))
pkt += data
chksum = compute_chksum(pkt)
pkt += chr(chksum >> 8)
pkt += chr(chksum & 0xFF)
pkt = '\xBE\xEF' + pkt
self.sock.sendall(bytes([ord(x) for x in pkt]))
(flags, cmd, pay) = self.recv()
if cmd == Command.ERROR.value | Command.REPLY.value:
err = RadioException(ord(pay[0]))
if err.error == 'EBUSY':
self.tx(data)
else:
raise err
elif Command.TXDATA.value | Command.REPLY.value:
return
else:
raise Exception('Unexpected response: ' + str((flags, cmd, pay)))
def rx(self):
(flags, cmd, pay) = self.recv()
if cmd == Command.ERROR.value | Command.REPLY.value:
raise RadioException(pay[0])
elif Command.RXDATA.value | Command.REPLY.value:
return pay
else:
raise Exception('Unexpected response: ' + str((flags, cmd, pay)))
def recv(self):
payload = ''
while True:
c = self.sock.recv(1)[0]
if self.state is ParseState.SYNC_H:
if c == SYNCWORD_H:
self.state = ParseState.SYNC_L
elif self.state is ParseState.SYNC_L:
if c == SYNCWORD_L:
self.state = ParseState.FLAGS
elif c == SYNCWORD_H:
self.state = ParseState.SYNC_L
else:
self.state = ParseState.SYNC_H
elif self.state is ParseState.FLAGS:
flags = c
self.state = ParseState.CMD
elif self.state is ParseState.CMD:
cmd = c
self.state = ParseState.PAYLOAD_LEN
elif self.state is ParseState.PAYLOAD_LEN:
length = c
# TODO: Validate len for cmd
if (length):
self.state = ParseState.PAYLOAD
else:
chksum = compute_chksum(''.join([chr(flags), chr(cmd), chr(0)]))
self.state = ParseState.CHKSUM_H
elif self.state is ParseState.PAYLOAD:
payload += chr(c)
length -= 1
self.state = ParseState.PAYLOAD
if (length == 0):
chksum = compute_chksum(''.join([chr(flags), chr(cmd), chr(len(payload))]) + payload)
self.state = ParseState.CHKSUM_H
elif self.state is ParseState.CHKSUM_H:
if (c == chksum >> 8):
self.state = ParseState.CHKSUM_L
else:
# TODO: Handle error
pass
self.state = ParseState.SYNC_H
break
elif self.state is ParseState.CHKSUM_L:
if (c != chksum & 0xFF):
# TODO: Handle error
pass
self.state = ParseState.SYNC_H
break
return (flags, cmd, payload)
def main():
if (len(sys.argv) == 3 or (len(sys.argv) == 5 and sys.argv[3] == 'rx')):
radio = Radio(sys.argv[1], int(sys.argv[2]))
numPackets = 0
while True:
pkt = radio.rx()
print('RX>', pkt)
numPackets += 1
print('Received ' + str(numPackets) + ' packet(s)')
elif (len(sys.argv) == 5 and sys.argv[3] == 'tx'):
radio = Radio(sys.argv[1], int(sys.argv[2]))
sleep(1)
for i in range(int(sys.argv[4])):
data = ('KC2QOL ' + str(i + 1) + ' ').ljust(104, 'x')
print('TX>', data)
radio.tx(data)
print('Sent ' + str(len(data)) + ' byte(s)')
# Look ma, no sleep!
else:
print('Usage: python3', sys.argv[0], 'hostname port [rx | tx n]')
if __name__ == '__main__':
main()
|
import itertools
import random
import collections
import operator
import functools
from scipy import ndimage
import numpy as np
import matplotlib
from panoptic_parts.utils.format import decode_uids
from panoptic_parts.utils.utils import _sparse_ids_mapping_to_dense_ids_mapping
# Functions that start with underscore (_) should be considered as internal.
# All other functions belong to the public API.
# Arguments and functions defined with the preffix experimental_ may be changed
# and are not backward-compatible.
# PUBLIC_API = [random_colors, uid2color]
# TODO(panos): Make VALIDATE_ARGS global. Exhaustive validation is usually computationally
# intensive, so we want to have a global switch (which late will default to False),
# that during debugging only turns argument validation on
VALIDATE_ARGS = True
# For Cityscapes Panoptic Parts the previously defined parula colormap slightly differs from
# PARULA99. This is done so vehicle chassis is colored with blue shades and thus resemble the
# original colors. This flag is enabled by default, although, if it is not possible to use the
# legacy colormap PARULA99 colormap is used. Otherwise, use set_use_legacy_cpp_parts_colormap.
# This flag will be disabled by default in the future.
USE_LEGACY_CPP_PARTS_COLORMAP = True
def set_use_legacy_cpp_parts_colormap(boolean):
global USE_LEGACY_CPP_PARTS_COLORMAP
assert isinstance(boolean, bool)
USE_LEGACY_CPP_PARTS_COLORMAP = boolean
# same as parula99_cm(np.linspace(0, 1, 6)), but with second color (id=1) moved to the end
LEGACY_PARULA6 = [
(0, 0, 0), #(61, 38, 168),
(27, 170, 222), (71, 203, 134), (234, 186, 48), (249, 250, 20), (67, 102, 253)]
# MATLAB® PARULA99 colormap, generated with Matlab 2019a: uint8(floor(parula(99)*255))
# This colormap is used for colorizing up to 99 parts pids
PARULA99_INT = [
(61, 38, 168), (63, 40, 176), (64, 43, 183), (65, 46, 190), (66, 48, 197),
(67, 51, 205), (68, 54, 211), (69, 57, 217), (70, 60, 223), (70, 64, 227),
(71, 67, 231), (71, 71, 235), (71, 75, 238), (71, 78, 241), (71, 82, 244),
(71, 85, 246), (71, 89, 248), (70, 93, 250), (69, 96, 251), (68, 100, 252),
(66, 104, 253), (64, 108, 254), (61, 112, 254), (57, 116, 254), (53, 120, 253),
(49, 124, 252), (47, 127, 250), (46, 131, 248), (45, 134, 246), (45, 138, 244),
(44, 141, 241), (43, 145, 238), (40, 148, 236), (38, 151, 234), (37, 154, 231),
(36, 157, 230), (34, 160, 228), (32, 163, 227), (30, 166, 225), (28, 169, 223),
(25, 172, 220), (22, 174, 217), (17, 177, 214), (11, 179, 210), (4, 181, 206),
(1, 183, 202), (0, 185, 198), (2, 187, 193), (9, 188, 189), (17, 190, 185),
(26, 191, 180), (33, 192, 175), (39, 194, 171), (44, 195, 166), (47, 197, 161),
(51, 198, 156), (55, 199, 151), (59, 201, 145), (65, 202, 139), (73, 203, 133),
(81, 203, 126), (89, 204, 119), (97, 204, 112), (106, 204, 105), (115, 204, 98),
(125, 204, 91), (134, 203, 84), (144, 202, 76), (153, 201, 69), (162, 200, 62),
(171, 198, 56), (179, 197, 51), (188, 195, 46), (196, 193, 42), (204, 192, 39),
(211, 190, 39), (218, 189, 40), (225, 187, 42), (231, 186, 46), (237, 185, 51),
(243, 185, 57), (248, 186, 61), (252, 188, 61), (254, 191, 58), (254, 195, 56),
(254, 199, 53), (253, 202, 50), (252, 207, 48), (250, 211, 46), (248, 215, 44),
(247, 219, 42), (245, 223, 40), (244, 227, 38), (244, 231, 36), (244, 235, 34),
(245, 239, 31), (246, 243, 28), (247, 247, 24), (249, 250, 20)]
PARULA99_FLOAT = list(map(lambda t: tuple(map(lambda c: c/255, t)), PARULA99_INT))
# parula_cm(x), x can be float in [0.0, 1.0] or int in [0, 99) to return a color
PARULA99_CM = matplotlib.colors.LinearSegmentedColormap.from_list('parula99', PARULA99_FLOAT, 99)
def random_colors(num):
"""
Returns a list of `num` random Python int RGB color tuples in range [0, 255].
Colors can be repeated. This is desired behavior so we don't run out of colors.
Args:
num: Python int, the number of colors to produce
Returns:
colors: a list of tuples representing RGB colors in range [0, 255]
"""
if not isinstance(num, int) or num < 0:
raise ValueError('Provide a correct, Python int number of colors.')
return [tuple(map(int, color)) for color in np.random.choice(256, size=(num, 3))]
def _generate_shades(center_color, deltas, num_of_shades):
# center_color: (R, G, B)
# deltas: (R ± ΔR, G ± ΔG, B ± ΔB)
# returns a list of rgb color tuples
# TODO: move all checks to the first API-visible function
if num_of_shades <= 0:
raise ValueError(f"num_of_shades must be a positive integer (was {num_of_shades}).")
if num_of_shades == 1:
return [center_color]
# TODO: enable d=0
if not all(map(lambda d: 0 < d <= 255, deltas)):
raise ValueError(f"deltas were not valid ({deltas}).")
center_color = np.array(center_color)
deltas = np.array(deltas)
starts = np.maximum(0, center_color - deltas)
stops = np.minimum(center_color + deltas + 1, 255)
# in order to generate num_of_shades colors we divide the range
# by the cardinality of the cartesian product |R × G × B| = |R| · |G| · |B|,
# i.e. cbrt(num_of_shades)
steps = np.floor((stops - starts) / np.ceil(np.cbrt(num_of_shades)))
shades = itertools.product(*map(np.arange, starts, stops, steps))
# convert to int
shades = list(map(lambda shade: tuple(map(int, shade)), shades))
# sanity check
assert len(shades) >= num_of_shades, (
f"_generate_shades: Report case with provided arguments as an issue.")
return list(sorted(random.sample(shades, num_of_shades), key=lambda t: np.linalg.norm(t, ord=2)))
def _num_instances_per_sid(uids):
# Note: instances in Cityscapes are not always labeled with continuous iids,
# e.g. one image can have instances with iids: 000, 001, 003, 007
# TODO(panos): move this functionality to utils.format
# np.array is needed since uids are Python ints
# and np.unique implicitly converts them to np.int64
# TODO(panos): remove this need when np.int64 is supported in decode_uids
uids_unique = np.unique(np.array(uids, dtype=np.int32))
_, _, _, sids_iids = decode_uids(uids_unique, return_sids_iids=True)
sids_iids_unique = np.unique(sids_iids)
sid2Ninstances = collections.defaultdict(lambda : 0)
for sid_iid in sids_iids_unique:
sid, iid, _ = decode_uids(sid_iid)
if iid >= 0:
sid2Ninstances[sid] += 1
return sid2Ninstances
def _num_parts_per_sid(uids):
assert isinstance(uids, list)
# TODO(panos): add the list decoding functionality in decode_uids
sids_pids_unique = set(
map(operator.itemgetter(3),
map(functools.partial(decode_uids, return_sids_pids=True), uids)))
sid2Nparts = collections.defaultdict(lambda : 0)
for sid_pid in sids_pids_unique:
sid_pid_full = sid_pid * 100 if sid_pid <= 99 else sid_pid
sid = sid_pid_full // 100
pid = sid_pid_full % 100
if pid > 0:
sid2Nparts[sid] += 1
return sid2Nparts
def _sid2iids(uids):
# a dict mapping a sid to a set of all its iids
# or in other words a mapping from a semantic class to all object ids it has
# uids: a list of Python int uids
# iids do not need to be consecutive numbers
# TODO(panos): move this functionality to utils.format
sid2iids = collections.defaultdict(set)
for uid in set(uids):
sid, iid, _ = decode_uids(uid)
# decode_uids returns iid = -1 for pixels that don't have instance-level labels
if iid >= 0:
sid2iids[sid].add(iid)
return sid2iids
def _sid2pids(uids):
# a dict mapping a sid to a set of all its pids
# uids: a list of Python int uids
# TODO(panos): move this functionality to utils.format
assert isinstance(uids, list)
sid2pids = collections.defaultdict(set)
for uid in set(uids):
sid, _, pid = decode_uids(uid)
# decode_uids returns pid = -1 for pixels that don't have part-level labels
if pid >= 0:
sid2pids[sid].add(pid)
return sid2pids
def _validate_uid2color_args(uids, sid2color, experimental_deltas, experimental_alpha):
# TODO(panos): add more checks for type, dtype, range
# TODO(panos): optimize performance by minimizing overlapping functionality
if not isinstance(uids, (list, np.ndarray)):
raise ValueError(f"Provide a list or np.ndarray of uids. Given {type(uids)}.")
if isinstance(uids, np.ndarray):
uids = list(map(int, np.unique(uids)))
if not all(map(isinstance, uids, [int]*len(uids))):
raise ValueError(f"Provide a list of Python ints as uids. Given {uids}.")
if not all(map(lambda uid: 0 <= uid <= 99_999_99, uids)):
raise ValueError(f'There are uids that are not in the correct range. Given {uids}.')
# sid2color checks
if not isinstance(sid2color, dict) and sid2color is not None:
raise ValueError(f"sid2color must be a dict. Given {type(sid2color)}.")
sids_unique_from_uids = set(map(operator.itemgetter(0), map(decode_uids, uids)))
if not sids_unique_from_uids.issubset(sid2color):
raise ValueError(f"Not all sids in uids have a matching color in sid2color.")
# experimental_deltas checks
if not isinstance(experimental_deltas, tuple):
raise ValueError(f"experimental_deltas must be a tuple. Given {type(experimental_deltas)}.")
# if (not len(experimental_deltas) == 3 or
# not all(map(isinstance, experimental_deltas, [int]*len(experimental_deltas)))):
# raise
# if not all(map(lambda c: 0 <= c <= 255, experimental_deltas)):
# raise
# experimental_alpha checks
if experimental_alpha < 0 or experimental_alpha > 1:
raise ValueError('experimental_alpha must be in [0, 1].')
# max pids check
# we use np.array since uids are Python ints and np.unique implicitly converts them to np.int64
# TODO(panos): remove this requirement when np.int64 is supported in decode_uids
# _, _, pids = decode_uids(np.unique(np.array(uids, dtype=np.int32)))
# pid_max = np.amax(pids)
# if pid_max > N_MAX_COLORABLE_PARTS:
# raise NotImplementedError(
# f"Up to 5 parts are supported for coloring. Found pid={pid_max}.")
def uid2color(uids,
sid2color=None,
experimental_deltas=(60, 60, 60),
experimental_alpha=0.5):
"""
Generate an RGB palette for all unique uids in `uids`. The palette is a dictionary mapping
each uid from `uids` to an RGB color tuple, with values in range [0, 255].
A uid is an up to 7-digit integer that is interpreted according to our panoptic parts format
(see README), i.e., decode_uids(uid) = (sid, iid, pid).
The colors are generated in the following way:
- if uid represents a semantic-level label, i.e. uid = (sid, N/A, N/A),
then `sid2color`[sid] is used.
- if uid represents a semantic-instance-level label, i.e. uid = (sid, iid, N/A),
then a random shade of `sid2color`[sid] is generated, controlled by `experimental_deltas`.
The shades are generated so they are as diverse as possible and the variability depends
on the number of iids per sid. The more the instances per sid in the `uids`, the less
discriminable the shades are.
- if uid represents a semantic-instance-parts-level label, i.e. uid = (sid, iid, pid),
then a random shade is generated as in the semantic-instance-level case above and then
it is mixed with a single color from the parula colormap, controlled by `experimental_alpha`.
A different parula colormap is generated for each sid to achieve best discriminability
of parts colors per sid.
If `sid2color` is not provided (is None) then random colors are used. If `sid2color`
is provided but does not contain all the sids of `uids` an error is raised.
Example usage in {cityscapes, pascal}_panoptic_parts/visualize_from_paths.py.
Args:
uids: a list of Python int, or a np.int32 np.ndarray, with elements following the panoptic
parts format (see README)
sid2color: a dict mapping each sid of uids to an RGB color tuple of Python ints
with values in range [0, 255], sids that are not present in uids will be ignored
experimental_deltas: the range per color (Red, Green, Blue) in which to create shades, a small
range provides shades that are close to the sid color but makes instance colors to have less
contrast, a higher range provides better contrast but may create similar colors between
different sid instances
experimental_alpha: the mixing coeffient of the shade and the parula color, a higher value
will make the semantic-instance-level shade more dominant over the parula color
Returns:
uid2color: a dict mapping each uid to a color tuple of Python int in range [0, 255]
"""
if VALIDATE_ARGS:
_validate_uid2color_args(uids, sid2color, experimental_deltas, experimental_alpha)
if isinstance(uids, np.ndarray):
uids = list(map(int, np.unique(uids)))
## generate semantic-level colors
if sid2color is None:
# TODO(panos): add the list decoding functionality in decode_uids
sids_unique = set(map(operator.itemgetter(0), map(decode_uids, uids)))
random_sids_palette = random_colors(len(sids_unique))
sid2color = {sid: tuple(map(int, color))
for sid, color in zip(sids_unique, random_sids_palette)}
## generate instance shades
sid2num_instances = _num_instances_per_sid(uids)
# TODO(panos): experimental_deltas must be large for sids with many iids and small for
# sids with few iids, maybe automate this?
sid2shades = {sid: _generate_shades(sid2color[sid], experimental_deltas, Ninstances)
for sid, Ninstances in sid2num_instances.items()}
## generate discriminable per-sid parula colormap for parts
# For best part-level color discriminability we generate a colormap per-sid,
# this creates a discrininable colormap per-sid irrespectible of the number of parts.
sid2num_parts = _num_parts_per_sid(uids)
is_maybe_cpp = (USE_LEGACY_CPP_PARTS_COLORMAP and
all(map(lambda n: n<= 5, sid2num_parts.values())))
sid2parulaX = {
sid: LEGACY_PARULA6 if is_maybe_cpp
else (PARULA99_CM(np.linspace(0, 1, num=Nparts))*255)[:,:3].astype(np.int32)
for sid, Nparts in sid2num_parts.items()}
## generate the uid to colors mappings
# convert sets to lists so they are indexable, the .index() is needed since iids and
# pids do not need be to be continuous (otherwise sid2shades[sid][iid] is enough)
# TODO(panos): sid_2_* have overlapping functionality, consider merging them
sid_2_iids = {sid: list(iids) for sid, iids in _sid2iids(set(uids)).items()}
def _remove_all_no_error(lst, el):
if el in lst:
lst.remove(el)
assert el not in lst
return lst
sid_2_non_zero_pids = {sid: list(range(6)) if is_maybe_cpp
else _remove_all_no_error(list(pids), 0)
for sid, pids in _sid2pids(uids).items()}
uid_2_color = dict()
for uid in set(uids):
sid, iid, pid = decode_uids(uid)
if uid <= 99:
uid_2_color[uid] = sid2color[sid]
continue
index_iid = sid_2_iids[sid].index(iid)
sem_inst_level_color = sid2shades[sid][index_iid]
if uid <= 99_999 or pid == 0:
uid_2_color[uid] = sem_inst_level_color
continue
if pid >= 1:
index_pid = sid_2_non_zero_pids[sid].index(pid)
uid_2_color[uid] = tuple(map(int,
experimental_alpha * np.array(sem_inst_level_color) +
(1-experimental_alpha) * np.array(sid2parulaX[sid][index_pid])))
# catch any possible errors
assert uid in uid_2_color.keys()
return uid_2_color
def experimental_colorize_label(label, *,
sid2color=None,
return_sem=False,
return_sem_inst=False,
emphasize_instance_boundaries=True,
return_uid2color=False,
experimental_deltas=(60, 60, 60),
experimental_alpha=0.5):
"""
Colorizes a `label` with semantic-instance-parts-level colors based on sid2color.
Optionally, semantic-level and semantic-instance-level colorings can be returned.
The option emphasize_instance_boundaries will draw a 4-pixel white line around instance
boundaries for the semantic-instance-level and semantic-instance-parts-level outputs.
If a sid2color dict is provided colors from that will be used otherwise random colors
will be generated.
See panoptic_parts.utils.visualization.uid2color for how colors are generated.
Args:
label: 2-D, np.int32, np.ndarray with up to 7-digit uids, according to format in README
sid2color: a dictionary mapping sids to RGB color tuples in [0, 255], all sids in `labels`
must be in `sid2color`, otherwise provide None to use random colors
return_sem: if True returns `sem_colored`
return_sem_inst: if True returns `sem_inst_colored`
Returns:
sem_inst_parts_colored: 3-D, np.ndarray with RGB colors in [0, 255],
colorized `label` with colors that distinguish scene-level semantics, part-level semantics,
and instance-level ids
sem_colored: 3-D, np.ndarray with RGB colors in [0, 255], returned if return_sem=True,
colorized `label` with colors that distinguish scene-level semantics
sem_inst_colored: 3-D, np.ndarray with RGB colors in [0, 255], returned if return_sem_inst=True,
colorized `label` with colors that distinguish scene-level semantics and part-level semantics
"""
if not isinstance(label, np.ndarray):
raise ValueError(f"label is type: {type(label)}, only np.ndarray is supported.")
if not all([label.ndim == 2, label.dtype == np.int32]):
raise ValueError(
f"label has: {label.ndim} dims and {label.dtype} dtype, only 2 dims"
" and np.int32 are supported.")
# We visualize labels on three levels: semantic, semantic-instance, semantic-instance-parts.
# We want to colorize same instances with the same shades across levels for easier comparison
# so we create ids_all_levels_unique and call uid2color() once to achieve that.
# sids, iids, sids_iids shapes: (height, width)
sids, iids, _, sids_iids = decode_uids(label, return_sids_iids=True)
ids_all_levels_unique = np.unique(np.stack([sids, sids_iids, label]))
uid2color_dict = uid2color(ids_all_levels_unique, sid2color=sid2color,
experimental_deltas=experimental_deltas, experimental_alpha=experimental_alpha)
# We colorize ids using numpy advanced indexing (gathering). This needs an array palette, thus we
# convert the dictionary uid2color_dict to an array palette with shape (Ncolors, 3) and
# values in range [0, 255] (RGB).
# uids_*_colored shapes: (height, width, 3)
palette = _sparse_ids_mapping_to_dense_ids_mapping(uid2color_dict, (0, 0, 0), dtype=np.uint8)
uids_sem_colored = palette[sids]
uids_sem_inst_colored = palette[sids_iids]
uids_sem_inst_parts_colored = palette[label]
# optionally add boundaries to the colorized labels uids_*_colored
# TODO(panos): instance boundaries are found by the iids, if two iids are the same
# then an instance boundary is not drawn between different semantic-level classes
# TODO(panos): same iids islands, that are occluded, must not have closed boundaries
# investigate if a solution to that is easy
edge_option = 'sobel' # or 'erosion'
if emphasize_instance_boundaries:
# Create concatenation of sids and iids.
sids_iids_concat = np.where(sids_iids > 100, sids_iids, np.zeros_like(sids_iids))
# TODO(panos): simplify this algorithm
# create per-instance binary masks
sids_iids_unique = np.unique(sids_iids_concat)
boundaries = np.full(sids_iids.shape, False)
edges = np.full(sids_iids.shape, False)
for sid_iid in sids_iids_unique:
if sid_iid != 0:
iid = sid_iid % 1000
if 0 <= iid <= 999:
sid_iid_mask = np.equal(sids_iids, sid_iid)
if edge_option == 'sobel':
edge_horizont = ndimage.sobel(sid_iid_mask, 0)
edge_vertical = ndimage.sobel(sid_iid_mask, 1)
edges = np.logical_or(np.hypot(edge_horizont, edge_vertical), edges)
elif edge_option == 'erosion':
boundary = np.logical_xor(sid_iid_mask,
ndimage.binary_erosion(sid_iid_mask, structure=np.ones((4, 4))))
boundaries = np.logical_or(boundaries, boundary)
if edge_option == 'sobel':
boundaries_image = np.uint8(edges)[..., np.newaxis] * np.uint8([[[255, 255, 255]]])
elif edge_option == 'erosion':
boundaries_image = np.uint8(boundaries)[..., np.newaxis] * np.uint8([[[255, 255, 255]]])
uids_sem_inst_colored = np.where(boundaries_image,
boundaries_image,
uids_sem_inst_colored)
uids_sem_inst_parts_colored = np.where(boundaries_image,
boundaries_image,
uids_sem_inst_parts_colored)
returns = (uids_sem_inst_parts_colored,)
if return_sem:
returns += (uids_sem_colored,)
if return_sem_inst:
returns += (uids_sem_inst_colored,)
if return_uid2color:
returns += (uid2color_dict,)
if len(returns) == 1:
return returns[0]
return returns
|
<filename>src/main/app-resources/node_flood_extraction/run.py
#!/opt/anaconda/bin/python
# -*- coding: utf-8 -*-
#Classe runSnap legge da una singola cartella i file in essa contenuti
#li ordina in modo decrescente per data e crea
#le coppie per lo start di SNAP
#infine crea il file name da associare all'output di SNAP
import subprocess
import os,sys
import cioppy
import string
import datetime
import flood_cd
ciop = cioppy.Cioppy()
# define the exit codes - need to be better assessed
SUCCESS = 0
ERR_FAILED = 134
# add a trap to exit gracefully
def clean_exit(exit_code):
log_level = 'INFO'
if exit_code != SUCCESS:
log_level = 'ERROR'
msg = { SUCCESS: 'Download successfully concluded',
ERR_FAILED: 'Unable to complete the download'}
ciop.log(log_level, msg[exit_code])
def main():
print "cominciamo!!"
outdir=ciop.tmp_dir
cohe_list = []
image_list = []
input = sys.stdin.readlines()
input_files_hdfs = [x.strip().strip("'") for x in input]
#print "input file hdfs: ", input_files_hdfs
for input_file in input_files_hdfs:
#print "sys.stdin ", input
#creo una lista di immagini e una di coerenze e i file vanno ad alimentare il processore di flood extraction
print "input: ", input_file
print "vai con la data!"
#print input_file[-38:-34], input_file[-34:-32], input_file[-32:-30]
date_img = datetime.date(int(input_file[-48:-44]), int(input_file[-44:-42]),int(input_file[-42:-40]))
local_infile = ciop.copy(input_file, outdir, extract=False)
print "date_img: ", date_img
print "local file : %s" % local_infile
if (input_file.find('ampl') > -1):
image_list.append((date_img, local_infile))
if (input_file.find('cohe') > -1):
cohe_list.append((date_img, local_infile))
print image_list
print cohe_list
image_list.sort()
cohe_list.sort()
print image_list
print cohe_list
outfile_list=flood_cd.flood_cd_body(amp_list=[x[1] for x in image_list], cohe_long_list=[x[1] for x in cohe_list], window="", minimum_images=1, maximum_images=20, outdir=outdir, smallest_flood_pixels=9)
print os.path.isfile(outfile_list[0])
print outfile_list
res = ciop.publish(outfile_list, metalink=True)
print 'result from publish string: ', res
##preview and metadata file generation
#PREVIEW
for outfile in outfile_list:
#outfile_png = outfile.replace("tif","png")
#cmd = 'gdal_translate -scale 0 1 -of "PNG" -co WORLDFILE=YES '+outfile+' '+outfile_png
#print cmd
#res=subprocess.call(cmd, shell=True)
#worldfile=outfile.replace("tif","wld")
#outfile_pngw=worldfile.replace("wld","pngw")
#os.rename(worldfile, outfile_pngw)
#res = ciop.publish(outfile_png, metalink=True)
#res = ciop.publish(outfile_pngw, metalink=True)
#METADATA FILE
outfile_properties=outfile.replace("tif","properties")
file_properties=open(outfile_properties, "w")
file_properties.write("date="+datetime.datetime.now().isoformat()+'\n')
file_properties.write("output="+os.path.basename(outfile)+'\n')
file_properties.write("title=Flood map extent of "+os.path.basename(outfile)[17:25]+' relative to the situation of '+os.path.basename(outfile)[68:76]+'\n')
file_properties.write("copyrigth=e-Geos")
file_properties.close()
res = ciop.publish(outfile_properties, metalink=True)
#output_file = ciop.publish(res, mode='silent', metalink=True)
#print "output: ", output_file
try:
main()
except SystemExit as e:
if e.args[0]:
clean_exit(e.args[0])
raise
#else:
# atexit.register(clean_exit, 0)
#ciop.publish(outdef, metalink = true)
|
# Copyright (c) 2018 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from tempest.lib.common import ssh
from tempest.lib import exceptions as lib_exc
from networking_fortinet.tests.tempest_plugin.tests import fwaas_client
from networking_fortinet.tests.tempest_plugin.tests.scenario import manager
CONF = cfg.CONF
class FWaaSScenarioTest(fwaas_client.FWaaSClientMixin,
manager.NetworkScenarioTest):
def check_connectivity(self, ip_address, username=None, private_key=None,
should_connect=True,
check_icmp=True, check_ssh=True,
check_reverse_icmp_ip=None,
should_reverse_connect=True,
check_reverse_curl=False):
# if default allow is enabled as default by fgt fwaas, reverse
# connection should always be available.
if self._default_allow():
should_reverse_connect = True
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
if check_icmp:
ok = self.ping_ip_address(ip_address,
should_succeed=should_connect)
self.assertTrue(ok, msg=msg)
if check_ssh:
connect_timeout = CONF.validation.connect_timeout
kwargs = {}
if not should_connect:
# Use a shorter timeout for negative case
kwargs['timeout'] = 1
try:
client = ssh.Client(ip_address, username, pkey=private_key,
channel_timeout=connect_timeout,
**kwargs)
client.test_connection_auth()
self.assertTrue(should_connect, "Unexpectedly reachable")
if check_reverse_icmp_ip:
cmd = 'ping -c1 -w2 %s' % check_reverse_icmp_ip
try:
client.exec_command(cmd)
self.assertTrue(should_reverse_connect,
"Unexpectedly reachable (reverse)")
except lib_exc.SSHExecCommandFailed:
if should_reverse_connect:
raise
if check_reverse_curl:
cmd1 = 'curl -i https://www.google.com |grep "200 OK"'
cmd2 = 'curl http://www.eicar.org/download/eicar.com|\
grep EICAR-STANDARD-ANTIVIRUS-TEST-FILE'
try:
client.exec_command(cmd1)
self.assertTrue(should_reverse_connect,
"Unexpectedly reachable (reverse)")
except lib_exc.SSHExecCommandFailed:
if should_reverse_connect:
raise
# test virus file download should be blocked by default
# security profile enabled.
try:
client.exec_command(cmd2)
self.assertFalse(should_reverse_connect,
"Unexpectedly reachable (reverse)")
raise
except lib_exc.SSHExecCommandFailed:
if should_reverse_connect:
pass
except lib_exc.SSHTimeout:
if should_connect:
raise
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return body['router']
elif network_id:
# tsinghuanet plugin only allow one router per tenant, so if
# a router already exists, use it.
routers_list = client.list_routers(tenant_id=tenant_id)
if len(routers_list['routers']) == 1:
router = routers_list['routers'][0]
else:
router = self._create_router(client, tenant_id)
kwargs = {'external_gateway_info': dict(network_id=network_id)}
router = client.update_router(router['id'], **kwargs)['router']
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _default_allow(self):
if CONF.tsinghuanet.enable_default_fwrule:
return False
else:
return True
|
<reponame>YU-Zhejian/HERVfinder
import argparse
import os
import sys
from typing import List
from herv_finder import blast
from herv_finder.blast import indexer, search
PROG = "HERVfinder"
BANNER = """
=========================================================
= HERVfinder =
=========================================================
"""
def _parse_args(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(prog=PROG)
parser.add_argument('action', choices=['index', 'search'], help='Action to do')
parser.add_argument('-H', '--herv_fasta', type=str, help="[search] Fasta for HERV consensus sequence",
required=False)
parser.add_argument('-R', '--reference_fasta', type=str,
help="Fasta for (a subset of) human reference genome sequence", required=True)
parser.add_argument('-I', '--index', type=str, help="Basename of HERVfinder Blast index", required=True)
parser.add_argument('-P', '--pool_len', type=int, help="Number of process to use", required=False,
default=os.cpu_count())
parser.add_argument('--prefix_len', type=int, help="Prefix length of splitted index", required=False,
default=blast.DEFAULT_PREFIX_LEN)
parser.add_argument('--word_len', type=int, help="Word length of index", required=False,
default=blast.DEFAULT_WORD_LEN)
parser.add_argument('--chunk_len', type=int, help="[index] Chunk length of each process", required=False,
default=blast.DEFAULT_CHUNK_LEN)
parser.add_argument('-O', '--output', type=str, help="[search] Basename of the output", required=False)
parser.add_argument('--score_cutoff_slope', type=float, help="[search] Scope of post-alignment score cutoff",
required=False, default=blast.DEFAULT_SCORE_CUTOFF_SLOPE)
parser.add_argument('--score_cutoff_intersect', type=float,
help="[search] Intersect of post-alignment score cutoff",
required=False, default=blast.DEFAULT_SCORE_CUTOFF_INTERSECT)
parser.add_argument('--extend_batch_size', type=int, help="[search] Number of anchors to submit to each process",
required=False, default=blast.DEFAULT_EXTENDER_BATCH_SIZE)
return parser.parse_args(args)
def _search(
herv_fasta: str,
reference_fasta: str,
index: str,
output: str,
score_cutoff_slope: float,
score_cutoff_intersect: float,
pool_len: int,
extend_batch_size: int,
prefix_len: int,
word_len: int
):
needle_index = indexer.InMemorySimpleBlastIndex(word_len=word_len, prefix_len=prefix_len)
needle_index.attach_fasta(herv_fasta)
needle_index.create_index()
haystack_index = indexer.BlastIndex(index, word_len=word_len, prefix_len=prefix_len)
haystack_index.attach_fasta(reference_fasta)
searcher = search.BlastIndexSearcher(
needle_index=needle_index,
haystack_index=haystack_index,
output_basename=output,
pool_len=pool_len,
extend_batch_size=extend_batch_size
)
_ = list(searcher.merge_overlapping_anchors(searcher.extend(
searcher.generate_raw_anchors(),
score_cutoff_slope=score_cutoff_slope,
score_cutoff_intersect=score_cutoff_intersect
)))
def _index(
reference_fasta: str,
index: str,
pool_len: int,
prefix_len: int,
word_len: int,
chunk_len: int
):
bi = indexer.BlastIndex(
basename=index,
word_len=word_len,
chunk_len=chunk_len,
pool_len=pool_len,
prefix_len=prefix_len
)
bi.attach_fasta(reference_fasta)
bi.create_index()
def main(args: List[str]) -> int:
args = _parse_args(args)
if args.action == 'index':
_index(
reference_fasta=args.reference_fasta,
index=args.index,
pool_len=args.pool_len,
prefix_len=args.prefix_len,
word_len=args.word_len,
chunk_len=args.chunk_len
)
elif args.action == 'search':
_search(
herv_fasta=args.herv_fasta,
reference_fasta=args.reference_fasta,
index=args.index,
output=args.output,
score_cutoff_slope=args.score_cutoff_slope,
score_cutoff_intersect=args.score_cutoff_intersect,
pool_len=args.pool_len,
extend_batch_size=args.extend_batch_size,
prefix_len=args.prefix_len,
word_len=args.word_len,
)
else:
print('Undefined behaviour -- Your argparse module may not be working.')
return 0
if __name__ == '__main__':
print(BANNER)
print("RECV:" + " ".join(sys.argv))
sys.exit(main(sys.argv[1:]))
|
<reponame>UnnamedMoose/serialMonitor<gh_stars>1-10
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Sep 19 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.richtext
import SerialMonitor as sm
parseOutputsTimerID = 1000
###########################################################################
## Class mainFrame
###########################################################################
class mainFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"serialMonitor v{}".format(sm.__version__), pos = wx.DefaultPosition, size = wx.Size( 600,550 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.Size( 600,550 ), wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.HORIZONTAL )
self.m_panel1 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer2 = wx.BoxSizer( wx.VERTICAL )
self.serialPortText = wx.StaticText( self.m_panel1, wx.ID_ANY, u"Serial port:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.serialPortText.Wrap( -1 )
bSizer2.Add( self.serialPortText, 0, wx.ALL|wx.EXPAND, 5 )
portChoiceChoices = []
self.portChoice = wx.Choice( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, portChoiceChoices, 0 )
self.portChoice.SetSelection( 0 )
self.portChoice.SetMinSize( wx.Size( 120,-1 ) )
bSizer2.Add( self.portChoice, 0, wx.ALL|wx.EXPAND, 5 )
self.updatePortsButton = wx.Button( self.m_panel1, wx.ID_ANY, u"Update ports", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer2.Add( self.updatePortsButton, 0, wx.ALL|wx.EXPAND, 5 )
self.disconnectButton = wx.Button( self.m_panel1, wx.ID_ANY, u"Disconnect", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer2.Add( self.disconnectButton, 0, wx.ALL|wx.EXPAND, 5 )
self.baudRateText = wx.StaticText( self.m_panel1, wx.ID_ANY, u"Baud rate:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.baudRateText.Wrap( -1 )
bSizer2.Add( self.baudRateText, 0, wx.ALL|wx.EXPAND, 5 )
self.baudRateTxtCtrl = wx.TextCtrl( self.m_panel1, wx.ID_ANY, u"19200", wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
bSizer2.Add( self.baudRateTxtCtrl, 0, wx.ALL|wx.EXPAND, 5 )
self.readDelayText = wx.StaticText( self.m_panel1, wx.ID_ANY, u"Read delay [ms]", wx.DefaultPosition, wx.DefaultSize, 0 )
self.readDelayText.Wrap( -1 )
bSizer2.Add( self.readDelayText, 0, wx.ALL|wx.EXPAND, 5 )
self.readDelayTxtCtrl = wx.TextCtrl( self.m_panel1, wx.ID_ANY, u"1000", wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
bSizer2.Add( self.readDelayTxtCtrl, 0, wx.ALL|wx.EXPAND, 5 )
self.clearButton = wx.Button( self.m_panel1, wx.ID_ANY, u"Clear console", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer2.Add( self.clearButton, 0, wx.ALL|wx.EXPAND, 5 )
self.rawOutputCheckbox = wx.CheckBox( self.m_panel1, wx.ID_ANY, u"Raw output", wx.DefaultPosition, wx.DefaultSize, 0 )
self.rawOutputCheckbox.SetToolTip( u"Toggle between displaying complete lines terminated with an EOL char, or all received bytes as they arrive." )
bSizer2.Add( self.rawOutputCheckbox, 0, wx.ALL|wx.EXPAND, 5 )
self.hexOutputCheckbox = wx.CheckBox( self.m_panel1, wx.ID_ANY, u"Hex output", wx.DefaultPosition, wx.DefaultSize, 0 )
self.hexOutputCheckbox.SetToolTip( u"Tick to show hex codes of the received bytes. Only works with \"Raw output\"." )
bSizer2.Add( self.hexOutputCheckbox, 0, wx.ALL|wx.EXPAND, 5 )
self.fileLogCheckbox = wx.CheckBox( self.m_panel1, wx.ID_ANY, u"Log to file", wx.DefaultPosition, wx.DefaultSize, 0 )
self.fileLogCheckbox.SetToolTip( u"Tick to stream the log output to a chosen file." )
bSizer2.Add( self.fileLogCheckbox, 0, wx.ALL|wx.EXPAND, 5 )
self.loggingLevelText = wx.StaticText( self.m_panel1, wx.ID_ANY, u"Logging level:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.loggingLevelText.Wrap( -1 )
bSizer2.Add( self.loggingLevelText, 0, wx.ALL|wx.EXPAND, 5 )
loggingLevelChoiceChoices = [ u"ERROR", u"WARNING", u"INFO", u"DEBUG" ]
self.loggingLevelChoice = wx.Choice( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, loggingLevelChoiceChoices, 0 )
self.loggingLevelChoice.SetSelection( 0 )
bSizer2.Add( self.loggingLevelChoice, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.commandLineLabel = wx.StaticText( self.m_panel1, wx.ID_ANY, u"Type command:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.commandLineLabel.Wrap( -1 )
bSizer2.Add( self.commandLineLabel, 0, wx.ALL|wx.EXPAND, 5 )
self.m_panel1.SetSizer( bSizer2 )
self.m_panel1.Layout()
bSizer2.Fit( self.m_panel1 )
bSizer1.Add( self.m_panel1, 0, wx.EXPAND |wx.ALL, 5 )
bSizer10 = wx.BoxSizer( wx.VERTICAL )
self.logFileTextControl = wx.richtext.RichTextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_READONLY|wx.VSCROLL|wx.HSCROLL|wx.WANTS_CHARS|wx.BORDER_NONE )
bSizer10.Add( self.logFileTextControl, 1, wx.EXPAND |wx.ALL, 5 )
self.inputTextControl = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
bSizer10.Add( self.inputTextControl, 0, wx.ALL|wx.EXPAND, 5 )
bSizer1.Add( bSizer10, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.parseOutputsTimer = wx.Timer()
self.parseOutputsTimer.SetOwner( self, parseOutputsTimerID )
self.m_menubar1 = wx.MenuBar( 0 )
self.fileMenu = wx.Menu()
self.exitMenuItem = wx.MenuItem( self.fileMenu, wx.ID_ANY, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.fileMenu.Append( self.exitMenuItem )
self.m_menubar1.Append( self.fileMenu, u"File" )
self.serialMenu = wx.Menu()
self.serialMenuItem = wx.MenuItem( self.serialMenu, wx.ID_ANY, u"Edit serial details", wx.EmptyString, wx.ITEM_NORMAL )
self.serialMenu.Append( self.serialMenuItem )
self.m_menubar1.Append( self.serialMenu, u"Edit serial connection" )
self.SetMenuBar( self.m_menubar1 )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.onClose )
self.portChoice.Bind( wx.EVT_CHOICE, self.onChoseSerialPort )
self.updatePortsButton.Bind( wx.EVT_BUTTON, self.onUpdatePorts )
self.disconnectButton.Bind( wx.EVT_BUTTON, self.onDisconnect )
self.baudRateTxtCtrl.Bind( wx.EVT_KILL_FOCUS, self.onUpdateBaudRate )
self.baudRateTxtCtrl.Bind( wx.EVT_TEXT_ENTER, self.onUpdateBaudRate )
self.readDelayTxtCtrl.Bind( wx.EVT_KILL_FOCUS, self.onUpdateReadDelay )
self.readDelayTxtCtrl.Bind( wx.EVT_TEXT_ENTER, self.onUpdateReadDelay )
self.clearButton.Bind( wx.EVT_BUTTON, self.onClearConsole )
self.rawOutputCheckbox.Bind( wx.EVT_CHECKBOX, self.onRawOutputTicked )
self.fileLogCheckbox.Bind( wx.EVT_CHECKBOX, self.onToggleLogFile )
self.loggingLevelChoice.Bind( wx.EVT_CHOICE, self.onLoggingLevelChosen )
self.inputTextControl.Bind( wx.EVT_TEXT_ENTER, self.onSendInput )
self.Bind( wx.EVT_TIMER, self.onParseOutputs, id=parseOutputsTimerID )
self.Bind( wx.EVT_MENU, self.onClose, id = self.exitMenuItem.GetId() )
self.Bind( wx.EVT_MENU, self.onEditSerialPort, id = self.serialMenuItem.GetId() )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onClose( self, event ):
event.Skip()
def onChoseSerialPort( self, event ):
event.Skip()
def onUpdatePorts( self, event ):
event.Skip()
def onDisconnect( self, event ):
event.Skip()
def onUpdateBaudRate( self, event ):
event.Skip()
def onUpdateReadDelay( self, event ):
event.Skip()
def onClearConsole( self, event ):
event.Skip()
def onRawOutputTicked( self, event ):
event.Skip()
def onToggleLogFile( self, event ):
event.Skip()
def onLoggingLevelChosen( self, event ):
event.Skip()
def onSendInput( self, event ):
event.Skip()
def onParseOutputs( self, event ):
event.Skip()
def onEditSerialPort( self, event ):
event.Skip()
###########################################################################
## Class serialDetailsDialog
###########################################################################
class serialDetailsDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Edit serial connection details", pos = wx.DefaultPosition, size = wx.Size( 300,250 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer4 = wx.BoxSizer( wx.VERTICAL )
bSizer5 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText5 = wx.StaticText( self, wx.ID_ANY, u"Stop bits", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
bSizer5.Add( self.m_staticText5, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer5.Add( ( 0, 0), 1, wx.EXPAND, 5 )
stopBitsChoiceChoices = []
self.stopBitsChoice = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, stopBitsChoiceChoices, 0 )
self.stopBitsChoice.SetSelection( 0 )
bSizer5.Add( self.stopBitsChoice, 1, wx.ALL|wx.EXPAND, 5 )
bSizer4.Add( bSizer5, 0, wx.EXPAND, 5 )
bSizer51 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText51 = wx.StaticText( self, wx.ID_ANY, u"Parity", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText51.Wrap( -1 )
bSizer51.Add( self.m_staticText51, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer51.Add( ( 0, 0), 1, wx.EXPAND, 5 )
parityChoiceChoices = []
self.parityChoice = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, parityChoiceChoices, 0 )
self.parityChoice.SetSelection( 0 )
bSizer51.Add( self.parityChoice, 1, wx.ALL|wx.EXPAND, 5 )
bSizer4.Add( bSizer51, 0, wx.EXPAND, 5 )
bSizer52 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText52 = wx.StaticText( self, wx.ID_ANY, u"Byte size (bits)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText52.Wrap( -1 )
bSizer52.Add( self.m_staticText52, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer52.Add( ( 0, 0), 1, wx.EXPAND, 5 )
byteSizeChoiceChoices = []
self.byteSizeChoice = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, byteSizeChoiceChoices, 0 )
self.byteSizeChoice.SetSelection( 0 )
bSizer52.Add( self.byteSizeChoice, 1, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer4.Add( bSizer52, 0, wx.EXPAND, 5 )
self.m_staticline1 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer4.Add( self.m_staticline1, 0, wx.EXPAND |wx.ALL, 5 )
bSizer13 = wx.BoxSizer( wx.VERTICAL )
bSizer13.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
bSizer11.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.okButton = wx.Button( self, wx.ID_OK, u"Okay", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.okButton, 0, wx.ALL, 5 )
self.cancelButton = wx.Button( self, wx.ID_CANCEL, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.cancelButton, 0, wx.ALL, 5 )
bSizer11.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer13.Add( bSizer11, 1, wx.EXPAND, 5 )
bSizer13.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer4.Add( bSizer13, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer4 )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
|
# Copyright (c) 2016-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
import threading
import time
import opentracing
from opentracing.ext import tags as ext_tags
from . import codecs, thrift
from .constants import SAMPLED_FLAG, DEBUG_FLAG
class Span(opentracing.Span):
"""Implements opentracing.Span."""
__slots__ = ['_tracer', '_context',
'operation_name', 'start_time', 'end_time',
'logs', 'tags', 'update_lock']
def __init__(self, context, tracer, operation_name,
tags=None, start_time=None, references=None):
super(Span, self).__init__(context=context, tracer=tracer)
self.operation_name = operation_name
self.start_time = start_time or time.time()
self.end_time = None
self.update_lock = threading.Lock()
self.references = references
# we store tags and logs as Thrift objects to avoid extra allocations
self.tags = []
self.logs = []
if tags:
for k, v in six.iteritems(tags):
self.set_tag(k, v)
def set_operation_name(self, operation_name):
"""
Set or change the operation name.
:param operation_name: the new operation name
:return: Returns the Span itself, for call chaining.
"""
with self.update_lock:
self.operation_name = operation_name
return self
def finish(self, finish_time=None):
"""Indicate that the work represented by this span has been completed
or terminated, and is ready to be sent to the Reporter.
If any tags / logs need to be added to the span, it should be done
before calling finish(), otherwise they may be ignored.
:param finish_time: an explicit Span finish timestamp as a unix
timestamp per time.time()
"""
if not self.is_sampled():
return
self.end_time = finish_time or time.time() # no locking
self.tracer.report_span(self)
def set_tag(self, key, value):
"""
:param key:
:param value:
"""
with self.update_lock:
if key == ext_tags.SAMPLING_PRIORITY and not self._set_sampling_priority(value):
return self
if self.is_sampled():
tag = thrift.make_tag(
key=key,
value=value,
max_length=self.tracer.max_tag_value_length, )
self.tags.append(tag)
return self
def _set_sampling_priority(self, value):
"""
N.B. Caller must be holding update_lock.
"""
# Ignore debug spans trying to re-enable debug.
if self.is_debug() and value:
return False
try:
value_num = int(value)
except ValueError:
return False
if value_num == 0:
self.context.flags &= ~(SAMPLED_FLAG | DEBUG_FLAG)
return False
if self.tracer.is_debug_allowed(self.operation_name):
self.context.flags |= SAMPLED_FLAG | DEBUG_FLAG
return True
return False
def log_kv(self, key_values, timestamp=None):
if self.is_sampled():
timestamp = timestamp if timestamp else time.time()
# TODO handle exception logging, 'python.exception.type' etc.
log = thrift.make_log(
timestamp=timestamp if timestamp else time.time(),
fields=key_values,
max_length=self._tracer.max_tag_value_length,
)
with self.update_lock:
self.logs.append(log)
return self
def set_baggage_item(self, key, value):
prev_value = self.get_baggage_item(key=key)
new_context = self.context.with_baggage_item(key=key, value=value)
with self.update_lock:
self._context = new_context
if self.is_sampled():
logs = {
'event': 'baggage',
'key': key,
'value': value,
}
if prev_value:
# TODO add metric for this
logs['override'] = 'true'
self.log_kv(key_values=logs)
return self
def get_baggage_item(self, key):
return self.context.baggage.get(key)
def is_sampled(self):
return self.context.flags & SAMPLED_FLAG == SAMPLED_FLAG
def is_debug(self):
return self.context.flags & DEBUG_FLAG == DEBUG_FLAG
def is_rpc(self):
for tag in self.tags:
if tag.key == ext_tags.SPAN_KIND:
return tag.vStr == ext_tags.SPAN_KIND_RPC_CLIENT or \
tag.vStr == ext_tags.SPAN_KIND_RPC_SERVER
return False
def is_rpc_client(self):
for tag in self.tags:
if tag.key == ext_tags.SPAN_KIND:
return tag.vStr == ext_tags.SPAN_KIND_RPC_CLIENT
return False
@property
def trace_id(self):
return self.context.trace_id
@property
def span_id(self):
return self.context.span_id
@property
def parent_id(self):
return self.context.parent_id
@property
def flags(self):
return self.context.flags
def __repr__(self):
c = codecs.span_context_to_string(
trace_id=self.context.trace_id, span_id=self.context.span_id,
parent_id=self.context.parent_id, flags=self.context.flags)
return '%s %s.%s' % (c, self.tracer.service_name, self.operation_name)
def info(self, message, payload=None):
"""DEPRECATED"""
if payload:
self.log(event=message, payload=payload)
else:
self.log(event=message)
return self
def error(self, message, payload=None):
"""DEPRECATED"""
self.set_tag('error', True)
if payload:
self.log(event=message, payload=payload)
else:
self.log(event=message)
return self
|
from os import path
from typing import List, Optional
import pandas as pd
from code_base.excess_mortality.base_eurostat_bulk import (SaveFileMixin,
UN_LOC_VARS,
UN_DECODE_AGE_GROUPS,
UN_DECODE_SEX_GROUPS)
from code_base.excess_mortality.folder_constants import source_eu_population
from code_base.excess_mortality.get_excess_mortality import BaseBulkEurostatData
class GetEUPopulation(BaseBulkEurostatData):
def __init__(self):
self.eurostat_data = 'europe_population_by_age_and_sex'
super().__init__(self.eurostat_data, zipped=False)
def clean_up_df(self) -> None:
super().clean_up_df()
# TODO: Add comment explanations to the code.
self.eurostat_df.rename(columns={'2020': 'Population'}, inplace=True)
remove_missing_vals_mask = self.eurostat_df['Population'].str.contains(":") == False
self.eurostat_df = self.eurostat_df[remove_missing_vals_mask]
self.eurostat_df.dropna(how='any', subset=['Location'], axis=0, inplace=True)
self.replace_symbols(symbol_to_replace='p', replace_with='', apply_to_cols=['Population'])
self.replace_symbols(symbol_to_replace='e', replace_with='', apply_to_cols=['Population'])
self.eurostat_df['Population'] = self.eurostat_df['Population'].map(int)
return
def get_agg_sex_cntry_pop(self, sex: Optional[List] = None, age: Optional[List] = None) -> pd.DataFrame:
sex = ['Total'] if not sex else sex
age = ['Total'] if not age else age
filt_mask = self.eurostat_df['Sex'].isin(sex) & self.eurostat_df['Age'].isin(age)
df = self.eurostat_df[filt_mask].copy()
df.drop('Age', axis=1, inplace=True)
df = df.groupby(['Sex', 'Location'], as_index=False).sum('Population')
return df
class GetPopUN(SaveFileMixin):
def __init__(self):
self.file_name = 'UNDATA_Population by age, sex and urban-rural residence_2019.csv'
self.file_loc = path.join(source_eu_population, self.file_name)
self.pop_df = pd.read_csv(self.file_loc, encoding='utf-8-sig')
def clean_up_df(self) -> pd.DataFrame:
filt_age_cntry_sex_area = (self.pop_df['Country or Area'].isin(UN_LOC_VARS)) \
& (self.pop_df['Age'].isin(UN_DECODE_AGE_GROUPS.keys())) \
& (self.pop_df['Sex'].isin(UN_DECODE_SEX_GROUPS.keys())) \
& (self.pop_df['Area'] == 'Total')
self.pop_df = self.pop_df[filt_age_cntry_sex_area]
drop_cols = ['Value Footnotes', 'Record Type', 'Reliability', 'Area', 'Year', 'Source Year']
self.pop_df.drop(columns=drop_cols, inplace=True)
cols = {'Country or Area': 'Location', 'Value': 'Population'}
self.pop_df.rename(columns=cols, inplace=True)
self.pop_df['Sex'] = self.pop_df.apply(lambda x: UN_DECODE_SEX_GROUPS[x['Sex']], axis=1)
self.pop_df['Age'] = self.pop_df.apply(lambda x: UN_DECODE_AGE_GROUPS[x['Age']], axis=1)
return self.pop_df
def get_agg_sex_cntry_pop(self, sex: List = ['Total'], age: List = ['Total'], drop_age: bool = True) -> pd.DataFrame:
filt_mask = (self.pop_df['Sex'].isin(sex)) & (self.pop_df['Age'].isin(age))
df = self.pop_df[filt_mask].copy()
if drop_age:
df.drop('Age', axis=1, inplace=True)
grouping = ['Sex', 'Location']
else:
grouping = ['Age', 'Sex', 'Location']
df = df.groupby(grouping, as_index=False).sum('Population')
return df |
import warnings
import numpy as np
import pandas as pd
import sklearn
from sklearn import metrics
class MetricCatalog:
catalog_dict = {
'accuracy': {
'func': metrics.accuracy_score,
'params': {},
'require_score': False,
'binary': True,
'multi': True},
# AP is not straightfoward to apply to multiclass
'average_precision': {
'func': metrics.average_precision_score,
'params': {},
'require_score': True,
'binary': True,
'multi': False},
# Default configuration only handles binary classification
'f1': {
'func': metrics.f1_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'f1_micro': {
'func': metrics.f1_score,
'params': {'average': 'micro'},
'require_score': False,
'binary': True,
'multi': True},
'f1_macro': {
'func': metrics.f1_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
# Note: log_loss returns "loss" value
'neg_log_loss': {
'func': lambda y_true, y_pred: - metrics.log_loss(y_true, y_pred),
'params': {},
'require_score': True,
'binary': True,
'multi': True},
# Same problem as f1_score
'precision': {
'func': metrics.precision_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'precision_micro': {
'func': metrics.precision_score,
'params': {'average': 'micro'},
'require_score': False,
'binary': True,
'multi': True},
'precision_macro': {
'func': metrics.precision_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
# Same problem as f1_score
'recall': {
'func': metrics.recall_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'recall_micro': {
'func': metrics.recall_score,
'params': {'average': 'micro'},
'require_score': False,
'binary': True,
'multi': True},
'recall_macro': {
'func': metrics.recall_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'roc_auc': {
'func': metrics.roc_auc_score,
'params': {},
'require_score': True,
'binary': True,
'multi': False},
# Regression metrics
'explained_variance': {
'func': metrics.explained_variance_score,
'params': {},
'require_score': False,
'regression': True},
'neg_mean_absolute_error': {
'func': lambda y_true, y_pred: - metrics.mean_absolute_error(
y_true, y_pred),
'params': {},
'require_score': False,
'regression': True},
'neg_mean_squared_error': {
'func': lambda y_true, y_pred: - metrics.mean_squared_error(
y_true, y_pred),
'params': {},
'require_score': False,
'regression': True},
'neg_median_absolute_error': {
'func': lambda y_true, y_pred: - metrics.median_absolute_error(
y_true, y_pred),
'params': {},
'require_score': False,
'regression': True},
'r2': {
'func': metrics.r2_score,
'params': {},
'require_score': False,
'regression': True}}
@classmethod
def get_basic_metrics(cls,
task_type="classification"):
if task_type in ["classification",
"binary",
"multi"]:
return dict(
filter(lambda x: x[0] in ["accuracy",
"precision",
"recall"],
cls.catalog_dict.items()))
elif task_type in ["regression", "reg"]:
return dict(
filter(lambda x: x[0] in ["neg_mean_absolute_error",
"neg_mean_squared_error",
"r2"],
cls.catalog_dict.items()))
class ErrorSummary(object):
"""Error Analysis summary class."""
def __init__(self,
error_dist=None,
diversity=None,
errors=None):
"""Initialization
Args:
error_dist (pd.DataFrame): Error distribution table
diversity (pd.DataFrame): Diversity metric table
errors (pd.DataFrame): Misclassified examples
"""
self.error_dist = error_dist
self.diversity = diversity
self.errors = errors
class Evaluate():
def __init__(self,
alearn,
ac=None,
feature_names=None,
random_state=7):
"""Data evaluation class
Args:
alearn (AutoLearn or sklearn classifier instance):
Trained model instance
ac (AutoConverter instance): Autoconverter for converting column
data to feature matrix
feature_names (list): List of feature names (str)
If ac is given, the parameter will be disregarded.
If not, feature_names becomes mandatory.
random_state (int): random seed for pandas.sample. Default: 7
"""
if ac is None:
if feature_names is None:
raise ValueError("Either AutoConverter or feature_names must",
"be given.")
self.feature_names = feature_names
self.ac = None
else:
self.ac = ac
if feature_names is not None:
warnings.warn("AutoConverter instance is given so",
"feature_names will be discarded.")
self.feature_names = None
# TODO(Yoshi): Need to modify when it incorporates regression type
assert hasattr(alearn, "predict")
assert hasattr(alearn, "predict_proba")
if alearn.__class__.__name__ == "AutoLearn":
assert alearn.trained
else:
# scikit-learn classifiers do not have "fitted" flag
# A solution would be calling predict()/predict_proba()
# to see if it returns exception.
pass
self.alearn = alearn
self.rs = random_state
self.orig_eval_s = None
def _task_type(self):
"""Extract task_type from alearn (could be sklearn clf) instance."""
if hasattr(self.alearn, 'task'):
# AutoLearn instance passed
if self.alearn.task == 'regression':
task_type = 'regression'
elif hasattr(self.alearn.learner, "task_type"):
task_type = self.alearn.learner.task_type
else:
raise ValueError("wrong task_type passed to evaluate")
else:
# in this case we have scikit-learn classifier passed
if isinstance(self.alearn, sklearn.base.ClassifierMixin):
if len(self.alearn.classes_) == 2:
task_type = "binary"
else:
task_type = "multi"
elif isinstance(self.alearn, sklearn.base.RegressorMixin):
task_type = "regression"
else:
raise ValueError("Unknown instance type: {}".format(
type(self.alearn)))
return task_type
def _pos_label(self):
if hasattr(self.alearn, "pos_label"):
return self.alearn.pos_label
else:
# Assume that the second index is positive
return 1
def get_feature_indexes(self):
"""Returns di
Returns:
table_colname_pos_dict =
{"main..Ticket": [0, 20], "main..Age": [21, 30], ...}
"""
if self.ac is not None:
all_feature_names = self.ac.feature_names
else:
all_feature_names = self.feature_names
# table_feature_names_cols =
# ["main..Ticket", "main..Ticket", ...]
table_feature_name_cols = list(map(
lambda x: x.split('..')[0] + ".." + x.split('..')[1].split('.')[0],
all_feature_names))
table_colname_pos_dict = {}
begin = 0
table_colname = table_feature_name_cols[0]
counter = 0
for i, feature_name in enumerate(table_feature_name_cols):
if feature_name == table_colname:
counter += 1
else:
# end is not included to the interval
table_colname_pos_dict[table_colname] = [begin, i]
begin = i
counter = 1
table_colname = feature_name
table_colname_pos_dict[table_colname] = [begin,
len(table_feature_name_cols)]
return table_colname_pos_dict
@classmethod
def run_metric_functions(cls,
y,
y_pred,
y_prob,
metric_func_dict,
task_type):
"""Run metric functions
Args:
y (np.ndarray): True label vector
y_pred (np.ndarray): Predicted label vector
y_prob (np.ndarray): Probability vector
None if task_type == "regression"
metric_func_dict (dict): metric func dictionary
see MetricCatalog for details
task_type (str): task type {"binary", "multi", "regression"}
Returns:
orig_eval_s (pd.Series)
"""
if task_type not in ["binary", "multi", "regression"]:
raise ValueError('task_type must be {"binary", "multi",'
'"regression"}')
if task_type == "regression" and y_prob is not None:
warnings.warn("y_prob will be disregarded for"
"task_type=regression")
# Only use evaluation metric that supports task_type
sorted_metric_names = sorted(
filter(lambda x: (task_type in metric_func_dict[x] and
metric_func_dict[x][task_type]),
metric_func_dict.keys()))
# Evaluate prediction
eval_list = []
for metric_name in sorted_metric_names:
metric_info = metric_func_dict[metric_name]
metric_func = metric_info['func']
metric_params = metric_info['params']
assert metric_info[task_type]
if metric_info["require_score"]:
score = metric_func(y, y_prob, **metric_params)
else:
# Evaluation metrics for regression use y_pred
score = metric_func(y, y_pred, **metric_params)
eval_list.append(score)
orig_eval_s = pd.Series(eval_list, index=sorted_metric_names)
return orig_eval_s
def evaluate_performance(self,
X=None,
y=None,
metric_func_dict=None):
"""Evaluate prediction performance.
Args:
df (pd.DataFrame): Main table
X (np.array): Test feature matrix
y (np.array): Test label vector
metric_func_dict (dict): if None, it will use MetricCatalog
{"metric_name": {"func": func,
"params": {},
"require_score": True,
"binary": True,
"multi": True}}
Returns:
orig_eval_s (pd.Series): Evaluation values
"""
if metric_func_dict is None:
metric_func_dict = MetricCatalog.catalog_dict
if (X is None) or (y is None):
if self.ac is None:
raise ValueError(
"X and y are missing since AutoConverter instance was not",
"given.")
if not self.ac.hasdata:
raise RuntimeError(
"AutoConverter instance does not store X and y.")
X = self.ac.X
y = self.ac.y
# 1. pure prediction
y_pred = self.alearn.predict(X)
if self._task_type() in ["binary", "multi"]:
y_prob = self.alearn.predict_proba(X)
if self._task_type() == "binary":
y_prob = y_prob[:, self._pos_label()]
else:
# y_prob is empty for regression
y_prob = None
# y_pred, y_prob, metric_func_dict
self.orig_eval_s = Evaluate.run_metric_functions(y,
y_pred,
y_prob,
metric_func_dict,
self._task_type())
return self.orig_eval_s
def calculate_column_importance(self,
X=None,
y=None,
target=None,
metric_func_dict=None):
"""Evaluate column importance scores
Args:
X (np.array): Test feature matrix
y (np.array): Test label vector
column_importance (bool): Calculate column importance if True
Default=True,
metric_func_dict (dict): if None, it will use MetricCatalog
{"metric_name": {"func": func,
"params": {},
"require_score": True,
"binary": True,
"multi": True}}
Returns:
col_imp_df (pd.DataFrame):
accuracy average_precision f1 ...
tablename colname
main Age 0.012240 0.007844 0.013407 ...
Cabin 0.040392 0.024465 0.044803 ...
Embarked 0.008568 0.006306 0.009215 ...
Fare 0.009792 0.002827 0.010472 ...
Name 0.046512 0.057124 0.050983 ...
Parch 0.000000 0.000600 0.000127 ...
Pclass 0.029376 0.027463 0.031666 ...
Sex 0.227662 0.236873 0.244964 ...
SibSp 0.006120 0.006541 0.006973 ...
Ticket 0.055080 0.072796 0.058413 ...
"""
if metric_func_dict is None:
metric_func_dict = MetricCatalog.catalog_dict
if (X is None) or (y is None):
if self.ac is None:
raise ValueError(
"X and y must be given since it has no AutoConverter",
"instance.")
if not self.ac.hasdata:
raise RuntimeError(
"AutoConverter instance does not store X and y.")
X = self.ac.X
y = self.ac.y
if self.ac is None:
if target is None:
raise ValueError("target parameter must be given since",
"it has no AutoConverter instance.")
else:
target = self.ac.target
if target is not None:
warnings.warn("Give target will be discarded.")
if self.orig_eval_s is None:
self.evaluate_performance(X=X,
y=y,
metric_func_dict=metric_func_dict)
assert self.orig_eval_s is not None
# feature_indexes_dict[table_colname] = [begin, end]
feature_indexes_dict = self.get_feature_indexes()
# Only use evaluation metric that supports task_type
sorted_metric_names = sorted(
filter(lambda x: (self._task_type() in metric_func_dict[x] and
metric_func_dict[x][self._task_type()]),
metric_func_dict.keys()))
# Column importance
col_importance_list = []
col_imp_index_list = []
for table_colname in sorted(feature_indexes_dict.keys()):
tablename, colname = table_colname.split('..')
if tablename == 'main' and colname == target:
continue
col_imp_index_list.append(table_colname)
# Get needed feature columns range and spoil them
beg_idx, end_idx = feature_indexes_dict[table_colname]
X_shuf = X.copy()
np.random.shuffle(X_shuf[:, beg_idx:end_idx])
# Permuted prediction
y_shuf_pred = self.alearn.predict(X_shuf)
if self._task_type() in ["binary", "multi"]:
y_shuf_prob = self.alearn.predict_proba(X_shuf)
if self._task_type() == 'binary':
y_shuf_prob = y_shuf_prob[:, self._pos_label()]
# Calculate evaluation
metric_list = []
for metric_name in sorted_metric_names:
metric_info = metric_func_dict[metric_name]
metric_func = metric_info['func']
metric_params = metric_info['params']
assert metric_info[self._task_type()]
if metric_info["require_score"]:
# orig_score = metric_func(y, y_prob)
orig_score = self.orig_eval_s[metric_name]
shuf_score = metric_func(y, y_shuf_prob, **metric_params)
else:
# orig_score = metric_func(y, y_pred)
orig_score = self.orig_eval_s[metric_name]
shuf_score = metric_func(y, y_shuf_pred, **metric_params)
# TODO(Yoshi): Double check if there is no problem
# for neg_log_loss
if orig_score == 0:
metric_list.append(0.0)
else:
metric_list.append((orig_score - shuf_score) / orig_score)
col_importance_list.append(metric_list)
col_imp_df = pd.DataFrame(col_importance_list)
col_imp_df.columns = sorted_metric_names
tablename_list = list(map(lambda x: x.split('..')[0],
col_imp_index_list))
colname_list = list(map(lambda x: x.split('..')[1],
col_imp_index_list))
assert len(tablename_list) == len(col_imp_df)
assert len(tablename_list) == len(colname_list)
assert "tablename" not in sorted_metric_names
assert "colname" not in sorted_metric_names
col_imp_df["tablename"] = tablename_list
col_imp_df["colname"] = colname_list
col_imp_df.set_index(["tablename", "colname"], inplace=True)
return col_imp_df
def get_top_columns(self, n=3):
"""Returns n most important columns in the DataFrame
Args:
n (integer): number of columns returned
Returns:
list of [tablename..columname, ...] of most
important columns, sorted in descending order
"""
col_imp_df = self.calculate_column_importance()
if self._task_type() == 'binary':
metric = 'roc_auc'
else:
metric = 'neg_log_loss'
new_df = col_imp_df[metric].sort_values(ascending=False).head(n)
return list(map(lambda x: x[0] + '..' + x[1], new_df.index.values))
def get_mispredictions(self, df):
"""Get mispredicted examples based on the classifier
Args:
df (pd.DateFrame): dataset to evaluate.
Returns:
mispred_df (pd.DataFrame):
TODO(Yoshi): subtable support
"""
# Assume AutoConverter is mandatory for the function
if self.ac is None:
raise ValueError("AutoConverter instance is required to call",
"get_mispredictions()")
# TODO(Yoshi): This is not accurate.
# AutoConverter also should have "fitted" flag or something like that.
assert self.ac.hasdata
X, y = self.ac.transform(df)
pred_y = self.alearn.predict(X)
# TODO(Yoshi): Add some columns such as ==prediction== column,
# ==confidence==. To be disccused and will be another ticket.
return df.ix[y != pred_y]
def stratify_errors(self,
df,
max_numcat=5):
"""Stratify mispredicted examples.
TODO(Yoshi): Will avoid hand-crafted configuration
Args:
df (pd.DataFrame):
Returns:
es (ErrorSummary)
"""
# Assume AutoConverter is mandatory for the function
if self.ac is None:
raise ValueError("AutoConverter instance is required to call",
"stratify_errors()")
def calc_diversity(s):
"""Calculate entropy as a diversity metric."""
probs = s / s.sum()
return (probs * np.log(1.0 / probs)).sum()
assert self.ac.hasdata
error_df = self.get_mispredictions(df)
# Conduct for loop for each column
colname_list = []
error_dist_df_list = []
diversity_list = []
sorted_colnames = sorted(error_df.columns.tolist())
for colname in sorted_colnames:
if colname not in self.ac.colname_type_dict:
continue
error_count_s = error_df[colname].value_counts()
total_count_s = df[colname].value_counts()
error_dist_df = pd.concat([error_count_s, total_count_s], axis=1)
error_dist_df.columns = ["error_count", "total_count"]
error_dist_df["error_rate"] = (error_dist_df["error_count"] /
error_dist_df["total_count"])
if len(error_dist_df) > max_numcat:
continue
error_dist_df.index.name = "group"
error_dist_df = error_dist_df.reset_index()
# Calculate diversity score
diversity_score = calc_diversity(error_dist_df["error_rate"])
error_dist_df.loc[:, 'colname'] = colname
error_dist_df_list.append(error_dist_df)
diversity_list.append(diversity_score)
colname_list.append(colname)
if len(error_dist_df_list) < 1:
# No grouped result found
# TODO(Yoshi): Output any message?
return None
error_dist_concat_df = pd.concat(error_dist_df_list, axis=0)
error_dist_concat_df.set_index(["colname", "group"], inplace=True)
diversity_df = pd.DataFrame({"diversity": diversity_list},
index=colname_list)
return ErrorSummary(error_dist=error_dist_concat_df,
diversity=diversity_df,
errors=error_df)
def get_explanations(self,
test_df,
X=None,
topk=3,
max_candidates=10,
num_sampling=10,
spoil_method='random'):
"""Returns explanations (previously known as reason codes)
V1 simply calculates the average difference of class probabilities
no matter whether binary or multiclass
Args:
test_df (pd.DataFrame): Original DataFrame
X (np.array): Test feature matrix
topk (int): select top-k colnames for explanations
max_candidates (int): At most <max_candidates> columns will be
used for explanations (Default 10)
num_sampling (int): Number of sampling iterations
(Default 10)
spoil_method (str): {"random"}
Returns:
"""
# Assume AutoConverter is mandatory for the function
if self.ac is None:
raise ValueError("AutoConverter instance is required to call",
"get_explanations()")
# TODO(Yoshi): spoil_method should be improved
top_colnames = self.get_top_columns(n=max_candidates)
# TODO(Yoshi): it's not straightforward to visualize representative
# values for subtables. Only focus on main table for now
top_colnames = list(filter(lambda x: x.split('..')[0] == 'main',
top_colnames))
assert len(top_colnames) > 0
table_colname_feature_pos_dict = self.get_feature_indexes()
if X is None:
assert self.ac.hasdata
X = self.ac.X
all_pred = self.alearn.predict_proba(X)
table_colname_impact_dict = {}
for table_colname in top_colnames:
abs_diff_probs = np.zeros_like(all_pred)
beg_idx, end_idx = table_colname_feature_pos_dict[table_colname]
for _ in range(num_sampling):
X_shuf = X.copy()
np.random.shuffle(X_shuf[:, beg_idx:end_idx])
all_pred_shuf = self.alearn.predict_proba(X_shuf)
abs_diff_probs += np.abs(all_pred - all_pred_shuf)
# <num_sample>-dimensional vector
impact_scores = np.mean(abs_diff_probs, axis=1)
table_colname_impact_dict[table_colname] = impact_scores
impact_df = pd.DataFrame(table_colname_impact_dict)
assert len(impact_df) == len(test_df)
impact_df.index = test_df.index
all_explanation_list = []
for index, row in impact_df.iterrows():
top_s = row.sort_values(ascending=False).head(topk)
top_colnames = top_s.index.tolist()
cur_explanation_list = []
for table_colname in top_colnames:
# split colanme in to tablename and colname
tablename, colname = table_colname.split("..")
val = test_df.ix[index][colname]
cur_explanation_list.append((colname, val))
all_explanation_list.append(cur_explanation_list)
explain_df = pd.DataFrame({"explanations": all_explanation_list})
assert len(explain_df) == len(test_df)
explain_df.index = test_df.index
return explain_df
|
<filename>src/milannotations/datasets.py<gh_stars>1-10
"""PyTorch datasets that nicely wrap exemplars for every unit in a network."""
import collections
import csv
import pathlib
from typing import Any, Iterable, NamedTuple, Optional, Sequence, Union
from src.deps.netdissect import renormalize
from src.utils.typing import (Layer, PathLike, StrSequence, TransformStr,
TransformStrSeq, TransformTensor, Unit)
import numpy
import torch
from PIL import Image
from torch.utils import data
from torchvision import utils
from torchvision.transforms import functional
from tqdm.auto import tqdm
class TopImages(NamedTuple):
"""Top images for a unit."""
layer: str
unit: int
images: torch.Tensor
masks: torch.Tensor
def as_masked_images_tensor(self, opacity: float = .75) -> torch.Tensor:
"""Apply the masks to the images, forming a single tensor.
Args:
opacity (float, optional): Opacity for mask, with 1 meaning
that the masked area is black, and 0 meaning that the masked
area is shown as normal. Defaults to .75.
Returns:
torch.Tensor: Shape (len(self.images), 3, height, width) tensor
containing images with masks applied.
"""
if opacity < 0 or opacity > 1:
raise ValueError(f'opacity must be in [0, 1], got {opacity}')
masks = self.masks.clone().float()
masks[masks == 0] = 1 - opacity
images = self.images * masks
return images
def as_pil_images(self, opacity: float = .75) -> Sequence[Image.Image]:
"""Convert images into individual PIL images.
Args:
opacity (float, optional): See `as_masked_images_tensor`.
Defaults to .75.
Returns:
Sequence[Image.Image]: One PIL Image per top image.
"""
images = self.as_masked_images_tensor(opacity=opacity)
return [functional.to_pil_image(image) for image in images]
def as_pil_image_grid(self,
opacity: float = .75,
limit: Optional[int] = None,
**kwargs: Any) -> Image.Image:
"""Pack all images into a grid and return as a PIL Image.
Keyword arguments are forwarded to `torchvision.utils.make_grid`.
Args:
opacity (float, optional): See `as_masked_images_tensor`.
Defaults to .75.
limit (Optional[int], optional): If set, only include first `limit`
images in the grid. By default, all are included.
Returns:
Image.Image: Image grid containing all top images.
"""
if limit is None:
limit = len(self.images)
elif limit <= 0:
raise ValueError(f'limit must be > 0, got {limit}')
images = self.as_masked_images_tensor(opacity=opacity)[:limit]
kwargs.setdefault('nrow', 5)
grid = utils.make_grid(images, **kwargs)
return functional.to_pil_image(grid)
class TopImagesDataset(data.Dataset):
"""Top-activating images for invidual units."""
def __init__(self,
root: PathLike,
name: Optional[str] = None,
layers: Optional[Iterable[Layer]] = None,
device: Optional[Union[str, torch.device]] = None,
transform_images: Optional[TransformTensor] = None,
transform_masks: Optional[TransformTensor] = None,
display_progress: bool = True):
"""Initialize the dataset.
Args:
root (PathLike): Root directory for the dataset. See
`src.exemplars.compute` function for expected format.
name (Optional[str], optional): Human-readable name for this
dataset. Defaults to last two components of root directory.
layers (Optional[Iterable[Layer]], optional): The layers to load.
Layer data is assumed to be a subdirectory of the root.
By default, all subdirectories of root are treated as layers.
device (Optional[Union[str, torch.device]], optional): Send all
tensors to this device.
transform_images (Optional[TransformTensor], optional): Transform
all dataset images with this function. Defaults to None.
transform_masks (Optional[TransformTensor], optional): Transform
all dataset masks with this function. Defaults to None.
display_progress (bool, optional): Show a progress
bar when reading images into menu. Defaults to True.
Raises:
FileNotFoundError: If root directory does not exist or if layer
directory is missing images or masks.
ValueError: If no layers found or provided, or if units have
different number of top images.
"""
root = pathlib.Path(root)
if not root.is_dir():
raise FileNotFoundError(f'root directory not found: {root}')
if layers is None:
layers = [f.name for f in root.iterdir() if f.is_dir()]
if not layers:
raise ValueError('no layers given and root has no subdirectories')
if name is None:
name = f'{root.parent.name}/{root.name}'
self.root = root
self.name = name
self.layers = layers = tuple(sorted(str(layer) for layer in layers))
self.device = device
self.transform_images = transform_images
self.transform_masks = transform_masks
progress = layers
if display_progress is not None:
progress = tqdm(progress,
desc=f'load {root.parent.name}/{root.name}')
self.images_by_layer = {}
self.masks_by_layer = {}
self.units_by_layer = {}
renormalizer = renormalize.renormalizer(source='byte', target='pt')
for layer in progress:
images_file = root / str(layer) / 'images.npy'
masks_file = root / str(layer) / 'masks.npy'
for file in (images_file, masks_file):
if not file.exists():
raise FileNotFoundError(f'{layer} is missing {file.name}')
images = torch.from_numpy(numpy.load(images_file))
masks = torch.from_numpy(numpy.load(masks_file))
for name, tensor in (('images', images), ('masks', masks)):
if tensor.dim() != 5:
raise ValueError(f'expected 5D {name}, '
f'got {tensor.dim()}D '
f'in layer {layer}')
if images.shape[:2] != masks.shape[:2]:
raise ValueError(f'layer {layer} masks/images have '
'different # unit/images: '
f'{images.shape[:2]} vs. {masks.shape[:2]}')
if images.shape[3:] != masks.shape[3:]:
raise ValueError(f'layer {layer} masks/images have '
'different height/width '
f'{images.shape[3:]} vs. {masks.shape[3:]}')
# Handle units separately, since they're optional.
units_file = root / str(layer) / 'units.npy'
if units_file.exists():
units = torch.from_numpy(numpy.load(units_file))
if units.dim() != 1:
raise ValueError(f'expected 1D units, got {units.dim()}D')
else:
units = torch.arange(len(images))
images = images.float()
masks = masks.float()
shape = images.shape
images = images.view(-1, *shape[2:])
images = renormalizer(images)
images = images.view(*shape)
if device is not None:
images = images.to(device)
masks = masks.to(device)
self.images_by_layer[layer] = images
self.masks_by_layer[layer] = masks
self.units_by_layer[layer] = units
self.samples = []
for layer in layers:
for unit, images, masks in zip(self.units_by_layer[layer],
self.images_by_layer[layer],
self.masks_by_layer[layer]):
if transform_images is not None:
images = transform_images(images)
if transform_masks is not None:
masks = transform_masks(masks)
sample = TopImages(layer=str(layer),
unit=unit.item(),
images=images,
masks=masks)
self.samples.append(sample)
def __getitem__(self, index: int) -> TopImages:
"""Return the top images.
Args:
index (int): Sample index.
Returns:
TopImages: The sample.
"""
return self.samples[index]
def __len__(self) -> int:
"""Return the number of samples in the dataset."""
return len(self.samples)
def lookup(self, layer: Layer, unit: int) -> TopImages:
"""Lookup top images for given layer and unit.
Args:
layer (Layer): The layer name.
unit (int): The unit number.
Raises:
KeyError: If no top images for given layer and unit.
Returns:
TopImages: The top images.
"""
layer = str(layer)
if layer not in self.images_by_layer:
raise KeyError(f'layer "{layer}" does not exist')
if unit >= len(self.images_by_layer[layer]):
raise KeyError(f'layer "{layer}" has no unit {unit}')
return TopImages(layer=layer,
unit=unit,
images=self.images_by_layer[layer][unit],
masks=self.masks_by_layer[layer][unit])
def unit(self, index: int) -> Unit:
"""Return the unit at the given index.
Args:
index (int): Sample index.
Returns:
Unit: Layer and unit number.
"""
sample = self[index]
return sample.layer, sample.unit
def units(self, indices: Sequence[int]) -> Sequence[Unit]:
"""Return the units at the given indices.
Args:
indices (Sequence[int]): Sample indices.
Returns:
Sequence[Unit]: Layer and unit numbers.
"""
units = [self.unit(index) for index in indices]
return tuple(units)
@property
def k(self) -> int:
"""Return the "k" in "top-k images"."""
assert len(self) > 0, 'empty dataset?'
return self.samples[0].images.shape[0]
DEFAULT_LAYER_COLUMN = 'layer'
DEFAULT_UNIT_COLUMN = 'unit'
DEFAULT_ANNOTATION_COLUMN = 'summary'
DEFAULT_ANNOTATIONS_FILE_NAME = 'annotations.csv'
class AnnotatedTopImages(NamedTuple):
"""Top images and annotation for a unit."""
layer: str
unit: int
images: torch.Tensor
masks: torch.Tensor
annotations: StrSequence
def as_top_images(self) -> TopImages:
"""Return the annotated top images as regular top images."""
return TopImages(*self[:-1])
def as_masked_images_tensor(self, **kwargs: Any) -> torch.Tensor:
"""Forward to `TopImages.as_masked_images_tensor`."""
return self.as_top_images().as_masked_images_tensor(**kwargs)
def as_pil_images(self, **kwargs: Any) -> Sequence[Image.Image]:
"""Forward to `TopImages.as_pil_images`."""
return self.as_top_images().as_pil_images(**kwargs)
def as_pil_image_grid(self, **kwargs: Any) -> Image.Image:
"""Forward to `TopImages.as_pil_image_grid`."""
return self.as_top_images().as_pil_image_grid(**kwargs)
class AnnotatedTopImagesDataset(data.Dataset):
"""Same as TopImagesDataset, but each unit also has annotations."""
def __init__(self,
root: PathLike,
*args: Any,
annotations_csv_file: Optional[PathLike] = None,
layer_column: str = DEFAULT_LAYER_COLUMN,
unit_column: str = DEFAULT_UNIT_COLUMN,
annotation_column: str = DEFAULT_ANNOTATION_COLUMN,
annotation_count: int = None,
transform_annotation: Optional[TransformStr] = None,
transform_annotations: Optional[TransformStrSeq] = None,
**kwargs: Any):
"""Initialize the dataset.
All *args and **kwargs are forwarded to TopImagesDataset.
Args:
annotations_csv_file (Optional[PathLike], optional): Path to
annotations CSV file.
Defaults to `root / DEFAULT_ANNOTATIONS_FILE_NAME`.
layer_column (str, optional): CSV column containing layer name.
Defaults to `DEFAULT_LAYER_COLUMN`.
unit_column (str, optional): CSV column containing unit name.
Defaults to `DEFAULT_UNIT_COLUMN`.
annotation_column (str, optional): CSV column containing
annotation. Defaults to `DEFAULT_ANNOTATION_COLUMN`.
annotation_count (Optional[int], optional): Exact number of
annotations to keep for each sample. If a sample has fewer than
this many annotations, it will be excluded. If it has more,
throw out the extras. If this value is None, no samples will
be excluded. Defaults to None.
transform_annotation (Optional[TransformStr], optional): Call
this transform each annotation in isolation. Applied before
`transform_annotations`. Defaults to None.
transform_annotations (Optional[TransformStrSeq], optional): Call
this transform on each sample's annotations. Applied after
`transform_annotation`, if that argument is set too.
Raises:
FileNotFoundError: If annotations CSV file is not found.
KeyError: If CSV is missing layer, unit, or annotation column.
ValueError: If either validate flag is set and validation fails.
"""
root = pathlib.Path(root)
if annotations_csv_file is None:
annotations_csv_file = root / DEFAULT_ANNOTATIONS_FILE_NAME
annotations_csv_file = pathlib.Path(annotations_csv_file)
if not annotations_csv_file.is_file():
raise FileNotFoundError(
f'annotations_csv_file not found: {annotations_csv_file}')
with annotations_csv_file.open('r') as handle:
reader = csv.DictReader(handle)
assert reader.fieldnames is not None, 'null columns?'
fields = set(reader.fieldnames)
rows = tuple(reader)
for column in (layer_column, unit_column, annotation_column):
if column not in fields:
raise KeyError(f'annotations csv missing column: {column}')
annotations_by_layer_unit = collections.defaultdict(list)
for row in rows:
layer_str = row[layer_column]
layer: Layer = int(layer_str) if layer_str.isdigit() else layer_str
unit = int(row[unit_column])
annotation = row[annotation_column]
if transform_annotation is not None:
annotation = transform_annotation(annotation)
annotations_by_layer_unit[layer, unit].append(annotation)
samples = []
top_images_dataset = TopImagesDataset(root, *args, **kwargs)
if annotation_count is None:
for top_images in top_images_dataset.samples:
la, un = top_images.layer, top_images.unit
annotations: StrSequence = annotations_by_layer_unit[la, un]
if transform_annotations is not None:
annotations = transform_annotations(annotations)
annotated_top_images = AnnotatedTopImages(
layer=la,
unit=un,
images=top_images.images,
masks=top_images.masks,
annotations=tuple(annotations))
samples.append(annotated_top_images)
else:
for key, annotations in annotations_by_layer_unit.items():
if len(annotations) < annotation_count:
continue
elif len(annotations) > annotation_count:
annotations = annotations[:annotation_count]
if transform_annotations is not None:
annotations = transform_annotations(annotations)
top_images = top_images_dataset.lookup(*key)
annotated_top_images = AnnotatedTopImages(
layer=top_images.layer,
unit=top_images.unit,
images=top_images.images,
masks=top_images.masks,
annotations=tuple(annotations))
samples.append(annotated_top_images)
self.samples = tuple(samples)
self.samples_by_layer_unit = {(s.layer, s.unit): s for s in samples}
self.name = top_images_dataset.name
self.layers = top_images_dataset.layers
def __getitem__(self, index: int) -> AnnotatedTopImages:
"""Return the annotated top images.
Args:
index (int): Sample index.
Returns:
AnnotatedTopImages: The sample.
"""
return self.samples[index]
def __len__(self) -> int:
"""Return the number of samples in the dataset."""
return len(self.samples)
def lookup(self, layer: Layer, unit: int) -> AnnotatedTopImages:
"""Lookup annotated top images for given layer and unit.
Args:
layer (Layer): The layer name.
unit (int): The unit number.
Raises:
KeyError: If no top images for given layer and unit.
Returns:
AnnotatedTopImages: The annotated top images.
"""
key = (str(layer), unit)
if key not in self.samples_by_layer_unit:
raise KeyError(f'no annotated top images for: {key}')
sample = self.samples_by_layer_unit[key]
return sample
def unit(self, index: int) -> Unit:
"""Return the unit at the given index.
Args:
index (int): Sample index.
Returns:
Unit: Layer and unit number.
"""
sample = self[index]
return sample.layer, sample.unit
def units(self, indices: Sequence[int]) -> Sequence[Unit]:
"""Return the units at the given indices.
Args:
indices (Sequence[int]): Sample indices.
Returns:
Sequence[Unit]: Layer and unit numbers.
"""
units = [self.unit(index) for index in indices]
return tuple(units)
@property
def k(self) -> int:
"""Return the "k" in "top-k images"."""
assert len(self) > 0, 'empty dataset?'
return self.samples[0].images.shape[0]
AnyTopImages = Union[TopImages, AnnotatedTopImages]
AnyTopImagesDataset = Union[TopImagesDataset, AnnotatedTopImagesDataset]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.